package geppetto.cat.constrains;

import geppetto.cat.common.ContinuosDistributions;
import geppetto.cat.common.GaussianDistribution;
import geppetto.phraseHMM.IBMM1;
import geppetto.phraseHMM.Trellis;

/**
 * Implementes the bijecty constrains.
 * We have a constrain for each source word.
 * According to the dual which we are minimizing 
 * the x value of the line search is lambda which we initialize to zero
 * 
 *  We use a slack which corresponds to the epsilon of formula 6 of
 *  altun ans smola
 *  
 * @author javg
 *
 */
public class EqualityStochasticFertilityConstrains extends ProjectionConstrains{

 	double originalLikelihood = 0;
	IBMM1 model;
	Trellis origProbCache;
	double newLikelihood =0;
		
//	public int sSize,fSize;
//	int _numberIterations;
//	double optimizationPrecision;
//	double[]gnorm;
//	double[] gval;
//	double currentDot = 0;
//	double lambdaNorm;
//	double slack;
//	
//	Trellis lambda ;
//	int iter;
	ContinuosDistributions[] _dist;
	
	public int getSentenceNumber(){
		return model._sentenceNumber;
	}
	
	public EqualityStochasticFertilityConstrains(IBMM1 model,double epsilon, double slack, double maxStep, 
			int maxNumberIterations, ContinuosDistributions[] dist){
		this.model = model;
		this.optimizationPrecision = epsilon;
		this.slack = slack;
		sSize = model._sourceSentenceIDS.length;
		fSize = model._foreignSentenceIDS.length;
		lambda = new Trellis(sSize,1);
		lambda.clear(0);
		origProbCache = model._probCache.deppCopy();
		originalLikelihood = model.getRealLikelihood();
		newLikelihood = originalLikelihood;
		this.maxStep = maxStep;
		_maxNumberIterations = maxNumberIterations;
		_dist = dist;
	}
	
	public int getModelMaxTrainingIteration(){
		return model._numberTrainingIterations;
	}
	
	public int getModelTrainingIteration(){
		return model._currentTrainingIteration;
	}
	
	
	public int getNumParamters(){
		return sSize;
	}
	
	
	public  void updateStats(SentenceConstrainedProjectionStats stats){
		stats.objective = getObjective();
		stats.likelihood = model.getRealLikelihood();
		stats.originalLikelihood = originalLikelihood;
		stats.sentenceNumber = model._sentenceNumber;
		stats.lambda = lambda;
	}
		
	/**
	 * Update the lambdas with projection. If the value of x gets smaller than 0 it 
	 * is set to zero for projection. Lambda as to be bigger than zero since
	 * we want E[f] - b < 0.  
	 * Note: the getStepSize code requires that we create a new 
	 * Trellis for this.lambda rather than overwriting contents of this.lambda.
	 * @param lambdas
	 * @param ascentDirection
	 * @param stepSize
	 * @return
	 */
	public void updateLambda(Trellis ascentDirection, double stepSize, SentenceConstrainedProjectionStats stats){
		lambdaNorm = 0;
		Trellis newLambda = new Trellis(sSize,1);
		for(int i =0; i < sSize; i++){
			double nli = lambda.getProb(i, 0) + stepSize*ascentDirection.getProb(i, 0);		
			newLambda.setProb(i, 0, nli);
			lambdaNorm += nli*nli;
		}
		lambdaNorm = Math.sqrt(lambdaNorm);
		lambda=newLambda;
		updateModel(stats);
	}
	
	
	public void  setLambda(Trellis setLambdaTo, SentenceConstrainedProjectionStats stats){
		lambdaNorm = 0;
		Trellis newLambda = new Trellis(sSize,1);
		for(int i =0; i < sSize; i++){
			double nli = setLambdaTo.getProb(i,0);
			lambda.setProb(i, 0, nli);
			lambdaNorm += nli*nli;
		}
		lambdaNorm = Math.sqrt(lambdaNorm);
//		System.out.println("Lambda norm " + lambdaNorm);
		this.lambda = newLambda;	
		updateModel(stats);
	}
	
	
	/**
	 * See if the norm of the gradinet is smaller than epsilon exit
	 * In the case of projective graiend we need to see if
	 * and entry on lambda is zero then the gradient for that position 
	 * need to be non-zero, otherwise it can have any value.
	 * Imagine the case were all lambda is zero if the gradient were all negative

	 * JOAO: Removed constraint that gradient need to be zero
	 * @param gradient
	 * @param lambda
	 * @param optimizationPrecision
	 * @return
	 */
	public double gradientNorm(Trellis gradient){
		double norm = 0;
		for(int i = 0; i < sSize; i++){
			double gi = gradient.getProb(i,0);
			norm+= gi*gi;
		}
		return Math.sqrt(norm);
	}
	
	/**
	 * Given a set of constrains it updates the model posteriors.
	 * NOTE Lamnda is always bigget than zero. 
	 * @param constrains
	 * @param originalProbCache
	 */
	public void updateModel(SentenceConstrainedProjectionStats stats){	
		for (int c = 0; c < sSize; c++) {
			double expC = Math.exp(-lambda.getProb(c, 0));
			for (int fi = 0; fi < fSize; fi++) {
				double newValue = origProbCache.getProb(fi, c)*expC;
				model._probCache.setProb(fi, c, newValue);
			}
		}
		//Add nulls value
		for (int fi = 0; fi < fSize; fi++) {
			model._probCache.setProb(fi, sSize, origProbCache.getProb(fi, sSize));
		}		
		//Make forward backward etc.
		model.makePosteriors(sSize, fSize);
		newLikelihood = model.getRealLikelihood();
		stats.fbcall++;
	}
	

	/**
	 * For steepest descent the direction is the gradient
	 * @param gradient
	 * @return
	 */
	public Trellis getAscentDirection(Trellis gradient){
		return gradient;
	}
	
	

	public Trellis getNumericalGradient(){
		Trellis gradient = new Trellis(sSize,1);
		Trellis oldLambda = lambda.deppCopy();
		double oldObjective = getObjective();
		double epsilon = 1e-8;
		for (int j = 0; j < sSize; j++) {
				SentenceConstrainedProjectionStats stats =  new SentenceConstrainedProjectionStats(sSize,fSize,model._sentenceNumber);
				lambda.setProb(j, 0, oldLambda.getProb(j,0)+epsilon);
				setLambda(lambda, stats);
				gradient.setProb(j, 0, (getObjective() - oldObjective)/epsilon);
				setLambda(oldLambda, stats);
				updateModel(stats);			
		}
		return gradient;
	}

	
	
	
	public Trellis getGradient(){
		Trellis gradient = new Trellis(sSize,1);
		for(int si = 0; si < sSize; si++){
			double variance = ((GaussianDistribution)_dist[si]).variance;
			double mean = ((GaussianDistribution)_dist[si]).mean;
			double lambdai = lambda.getProb(si,0);
			double gradientsi=0;
			for(int fi = 0; fi < fSize; fi++){
				gradientsi += model._statePosteriors.getProb(fi, si);			
			}
			gradientsi -= mean;
			gradientsi -= lambdai*variance;
			gradient.setProb(si, 0, gradientsi);
		}
		return gradient;
	}
	
	@Override
	public double getOriginalLikelihood() {
		return originalLikelihood;
	}
	
	/**
	 * See derivation paper:
	 */
	public double getObjective(){
		double objective = 0;
		//-log(Z)
		objective -= Math.log(newLikelihood);
		objective += Math.log(originalLikelihood);
		for(int c = 0; c < sSize; c++){
			double variance = ((GaussianDistribution)_dist[c]).variance;
			double mean = ((GaussianDistribution)_dist[c]).mean;
			double lambdai = lambda.getProb(c,0);
			objective+= -(lambdai*lambdai)*(variance)*1/2 - lambdai*mean - ((GaussianDistribution)_dist[c]).negLogNorm;
			
//			if(Double.isInfinite(objective) || Double.isNaN(objective)){
//				System.out.println("Iter " + projectionIteration + "Objective is infinity mean " + mean + " variance" + variance + 
//						" norm " + ((GaussianDistribution)_dist[c]).negLogNorm + " lambdai " + lambdai+
//						" obj " + objective + " original likelihood " + originalLikelihood + " new likelihood " + newLikelihood);
//				System.exit(-1);
//			}
		}
		
		
		return objective;
	}
	
	public  double dotProduct(Trellis a, Trellis b){
		double dot =0;
		for(int i =0; i < a._nrPositions ; i++){
			double ai = a.getProb(i, 0);
			double bi = b.getProb(i, 0);
			dot += ai*bi;
		}
		return dot;
	}
}
