/*    Copyright 2010 Tobias Marschall
 *
 *    This file is part of MoSDi.
 *
 *    MoSDi is free software: you can redistribute it and/or modify
 *    it under the terms of the GNU General Public License as published by
 *    the Free Software Foundation, either version 3 of the License, or
 *    (at your option) any later version.
 *
 *    MoSDi is distributed in the hope that it will be useful,
 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *    GNU General Public License for more details.
 *
 *    You should have received a copy of the GNU General Public License
 *    along with MoSDi.  If not, see <http://www.gnu.org/licenses/>.
 */

package mosdi.paa;

import mosdi.fa.FiniteMemoryTextModel;

public class TextBasedPAA extends PAA implements DeterministicEmitter {

	private ProductMarkovChain markovChain;
	private DAA daa;
	
	public TextBasedPAA(DAA daa, FiniteMemoryTextModel textModel) {
		this.markovChain = new ProductMarkovChain(daa, textModel);
		this.daa = daa;
	}
	
	@Override
	public int getStateCount() {
		return markovChain.getStateCount();
	}
	@Override
	public double transitionProbability(int state, int targetState) {
		return markovChain.getTransitionProbability(state, targetState);
	}
	@Override
	public int performOperation(int state, int value, int emission) {
		return daa.performOperation(markovChain.getAutomatonState(state), value, emission);
	}
	@Override
	protected int[] getTargets(int state) {
		return markovChain.getTargets(state);
	}
	@Override
	protected double[] getTargetProbabilities(int state) {
		return markovChain.getTargetProbabilities(state);
	}
	@Override
	public double[][] stateValueStartDistribution() {
		double[][] result = new double[markovChain.getStateCount()][getValueCount()];
		for (int state=0; state<markovChain.getStateCount(); ++state) {
			result[state][getStartValue()] = markovChain.getInitialProbability(state); 
		}
		return result;
	}
	@Override
	public double emissionProbability(int state, int emission) {
		return getEmission(state)==emission?1.0:0.0;
	}
	@Override
	public int getEmission(int state) {
		return daa.getEmission(markovChain.getAutomatonState(state));
	}
	@Override
	public int getEmissionCount() { return daa.getEmissionCount(); }
	@Override
	public int getStartState() { throw new UnsupportedOperationException(); }
	@Override
	public int getStartValue() { return daa.getStartValue(); }
	@Override
	public int getValueCount() { return daa.getValueCount(); }
	@Override
	public SimpleOperation getOperation() {
		return daa.getOperation();
	}

	//	public double[] computeValueDistribution(int iterations) {
	//		if (paa==null) init();
	//		return paa.computeValueDistribution(iterations);
	//	}
	//
	//	/** Computes the value distribution resulting from one fixed text, i.e. the 
	//	 *  text model is not used here.
	//	 */
	//	public double[] computeValueDistribution(int[] text) {
	//		double[] table1 = new double[getValueCount()];
	//		table1[getStartValue()] = 1.0;
	//		int state = getStartState();
	//		for (int c : text) {
	//			double[] table2 = new double[getValueCount()];
	//			state = transitionTarget(state, c);
	//			for (int value=0; value<getValueCount(); ++value) {
	//				if (table1[value]==0.0) continue;
	//				for (int emission=0; emission<getEmissionCount(); ++emission) {
	//					table2[performOperation(state, value, emission)] += table1[value]*emissionProbability(state, emission);
	//				}
	//			}
	//			table1 = table2;
	//		}
	//		return table1;
	//	}

}
