package geppetto.cat.constrains;

import geppetto.phraseHMM.IBMM1;
import geppetto.phraseHMM.Trellis;

/**
 * Implementes the bijecty constrains.
 * We have a constrain for each source word.
 * According to the dual which we are minimizing 
 * the x value of the line search is lambda which we initialize to zero
 * 
 *  We use a slack which corresponds to the epsilon of formula 6 of
 *  altun ans smola
 *  
 * @author javg
 *
 */
public class BijectivityConstrains extends ProjectionConstrains{

 	double originalLikelihood = 0;
	IBMM1 model;
	Trellis origProbCache;
	double newLikelihood =0;
	double[] b;
	
//	public int sSize,fSize;
//	int _numberIterations;
//	double optimizationPrecision;
//	double[]gnorm;
//	double[] gval;
//	double currentDot = 0;
//	double lambdaNorm;
//	double slack;
//	
//	Trellis lambda ;
//	int iter;
	
	
	public int getSentenceNumber(){
		return model._sentenceNumber;
	}
	
	public BijectivityConstrains(IBMM1 model,double epsilon, double slack, double maxStep, int maxNumberIterations, double[] b){
		this.model = model;
		this.optimizationPrecision = epsilon;
		this.slack = slack;
		sSize = model._sourceSentenceIDS.length;
		fSize = model._foreignSentenceIDS.length;
		lambda = new Trellis(sSize,1);
		lambda.clear(0);
		origProbCache = model._probCache.deppCopy();
		originalLikelihood = model.getRealLikelihood();
		newLikelihood = originalLikelihood;
		
		this.maxStep = maxStep;
		_maxNumberIterations = maxNumberIterations;
		this.b = b;
	}
	
	public int getModelMaxTrainingIteration(){
		return model._numberTrainingIterations;
	}
	
	public int getModelTrainingIteration(){
		return model._currentTrainingIteration;
	}
	
	@Override
	public double getOriginalLikelihood() {
		return originalLikelihood;
	}
	
	public int getNumParamters(){
		return sSize;
	}
	
	
	public  void updateStats(SentenceConstrainedProjectionStats stats){
		stats.objective = getObjective();
		stats.likelihood = model.getRealLikelihood();
		stats.originalLikelihood = originalLikelihood;
		stats.sentenceNumber = model._sentenceNumber;
		stats.lambda = lambda;
	}
		
	/**
	 * Update the lambdas with projection. If the value of x gets smaller than 0 it 
	 * is set to zero for projection. Lambda as to be bigger than zero since
	 * we want E[f] - b < 0.  
	 * Note: the getStepSize code requires that we create a new 
	 * Trellis for this.lambda rather than overwriting contents of this.lambda.
	 * @param lambdas
	 * @param ascentDirection
	 * @param stepSize
	 * @return
	 */
	public void updateLambda(Trellis ascentDirection, double stepSize, SentenceConstrainedProjectionStats stats){
		lambdaNorm = 0;
		Trellis newLambda = new Trellis(sSize,1);
		for(int i =0; i < sSize; i++){
			double nli = lambda.getProb(i, 0) + stepSize*ascentDirection.getProb(i, 0);		
			if(nli <= 0){
				nli = 0;
			}
			newLambda.setProb(i, 0, nli);
			lambdaNorm += nli*nli;
		}
		lambdaNorm = Math.sqrt(lambdaNorm);
		lambda=newLambda;
		updateModel(stats);
	}
	
	
	public void  setLambda(Trellis setLambdaTo, SentenceConstrainedProjectionStats stats){
		lambdaNorm = 0;
		Trellis newLambda = new Trellis(sSize,1);
		for(int i =0; i < sSize; i++){
			double nli = setLambdaTo.getProb(i,0);
			if(nli <= 0){
				nli = 0;
			}
			lambda.setProb(i, 0, nli);
			lambdaNorm += nli*nli;
		}
		lambdaNorm = Math.sqrt(lambdaNorm);
//		System.out.println("Lambda norm " + lambdaNorm);
		this.lambda = newLambda;	
		updateModel(stats);
	}
	
	
	/**
	 * See if the norm of the gradinet is smaller than epsilon exit
	 * In the case of projective graiend we need to see if
	 * and entry on lambda is zero then the gradient for that position 
	 * need to be non-zero, otherwise it can have any value.
	 * Imagine the case were all lambda is zero if the gradient were all negative
	 * @param gradient
	 * @param lambda
	 * @param optimizationPrecision
	 * @return
	 */
	public double gradientNorm(Trellis gradient){
		double norm = 0;
		for(int i = 0; i < sSize; i++){
			double gi = gradient.getProb(i,0);
			norm+= gi*gi;
		}
		return Math.sqrt(norm);
	}
	
	/**
	 * Given a set of constrains it updates the model posteriors.
	 * NOTE Lamnda is always bigget than zero. 
	 * @param constrains
	 * @param originalProbCache
	 */
	public void updateModel(SentenceConstrainedProjectionStats stats){	
		for (int c = 0; c < sSize; c++) {
			double expC = Math.exp(-lambda.getProb(c, 0));
			for (int fi = 0; fi < fSize; fi++) {
				double newValue = origProbCache.getProb(fi, c)*expC;
				model._probCache.setProb(fi, c, newValue);
			}
		}
		//Add nulls value
		for (int fi = 0; fi < fSize; fi++) {
			model._probCache.setProb(fi, sSize, origProbCache.getProb(fi, sSize));
		}		
		//Make forward backward etc.
		model.makePosteriors(sSize, fSize);
		newLikelihood = model.getRealLikelihood();
		stats.fbcall++;
	}
	

	/**
	 * For steepest descent the direction is the gradient
	 * @param gradient
	 * @return
	 */
	public Trellis getAscentDirection(Trellis gradient){
		return gradient;
	}
	
	

	public Trellis getNumericalGradient(){
		Trellis gradient = new Trellis(sSize,1);
		Trellis oldLambda = lambda.deppCopy();
		double oldObjective = getObjective();
		double epsilon = 1e-8;
		for (int j = 0; j < sSize; j++) {
				SentenceConstrainedProjectionStats stats =  new SentenceConstrainedProjectionStats(sSize,fSize,model._sentenceNumber);
				lambda.setProb(j, 0, oldLambda.getProb(j,0)+epsilon);
				setLambda(lambda, stats);
				gradient.setProb(j, 0, (getObjective() - oldObjective)/epsilon);
				setLambda(oldLambda, stats);
				updateModel(stats);			
		}
		return gradient;
	}

	
	
	//Return the gradient in this case its 
	// for each source word = -1 + sum_f (state_posteriors(t_i|s_i)
	// Note that the posteriors used here is the expected value
	//under p so they will change with each iteration
	// Position plus 1 as the norm
	public Trellis getGradient(){
	//	System.out.println("Entering get gradient");
	//	model.statePosteriors.print(fSize, sSize*2);
		Trellis gradient = new Trellis(sSize,1);
		for(int si = 0; si < sSize; si++){
			double gradientsi;
			gradientsi = -b[si];
			for(int fi = 0; fi < fSize; fi++){
				gradientsi += model._statePosteriors.getProb(fi, si);			
			}
//			Had the slack
			if(lambdaNorm != 0){
			//	if(lambda[si] != 0) System.out.println("lambda " + lambda[si] + " gradient " + gradient[si] + "lambda norm" + lambdaNorm);
				gradientsi-= slack*lambda.getProb(si, 0)/lambdaNorm;

			}
			if(gradientsi < 0 ){//&& lambda.getProb(si, 0) == 0){
				gradient.setProb(si, 0, 0);
			}else{
				gradient.setProb(si, 0, gradientsi);
			}
		}
		return gradient;
	}
	
	
	
	/**
	 * TODO: Check is this is the correct value
	 * objective: -lambda*b + log(sum_z p(z|x) * exp(-lambda * f(z,x))) + l2Norm
	 *  = -lambda*b + log(sum_z p(z,x) * exp(-lambda * f(z,x))) - log(sum_z p~) + l2Norm
	 * = -lambda*b + log(likelihood Q)  - loglikelihoodp~ + l2Norm
	 * @return
	 */
	public double getObjective(){
		double objective = 0;
		for(int c = 0; c < sSize; c++){
			objective -= lambda.getProb(c, 0)*b[c];
		}
		objective -= Math.log(newLikelihood);
		objective += Math.log(originalLikelihood);
		//Adding slack
		objective-=slack*lambdaNorm;
		return objective;
	}
	
	public double dotProduct(Trellis a, Trellis b){
		double dot =0;
		for(int i =0; i < a._nrPositions ; i++){
			double ai = a.getProb(i, 0);
			double bi = b.getProb(i, 0);
			if(ai > 0 && bi > 0)
				dot += ai*bi;
		}
		return dot;
	}
}
