package geppetto.cat.constrains;

import geppetto.phraseHMM.IBMM1;
import geppetto.phraseHMM.Trellis;


/**
 * The objective we are minimize is:
 * log sum_z q~
 * where q~ is p(z|x)exp(lamda*feature)
 * 
 * The gradient is: E[q]
 * @author javg
 *
 */
public class SymmetryConstrains extends ProjectionConstrains{
 	
 	//We have a constrain for each pair of words
	IBMM1 forward;
	IBMM1 backward;
	public double forwardOriginalLikelihood = 0;
 	public double backwardOriginalLikelihood = 0;
	public double forwardNewLikelihood =0;
	public double backwardNewLikelihood =0;
	Trellis origProbCacheForward;
	Trellis origProbCacheBackward;
	
	
	
	public int getSentenceNumber(){
		return forward._sentenceNumber;
	}
	
	/**
	 * Assume that forward and backward have been initialized with current 
	 * sentence (recall that they have internal state relevant to current sentence).
	 * @param forward
	 * @param backward
	 * @param epsilon
	 * @param slack
	 */
	public SymmetryConstrains(IBMM1 forward, IBMM1 backward, double epsilon, double slack, double maxStep, int maxNumberIterations){
		//System.out.println("Projecting sentence " + forward._sentenceNumber);
		this.forward =forward;
		this.backward = backward;
		this.optimizationPrecision = epsilon;
		this.slack = slack;
		sSize = forward._sourceSentenceIDS.length;
		fSize = forward._foreignSentenceIDS.length;
		
		lambda = new Trellis(fSize,sSize);
		origProbCacheForward = forward._probCache.deppCopy();
		origProbCacheBackward = backward._probCache.deppCopy();
		//Likelihood before starting the projections
		forwardOriginalLikelihood = forward.getRealLikelihood();
		backwardOriginalLikelihood = backward.getRealLikelihood();
		forwardNewLikelihood = forwardOriginalLikelihood;
		backwardNewLikelihood = backwardOriginalLikelihood;
		_maxNumberIterations = maxNumberIterations;
		this.maxStep = maxStep;
	}
	
	public int getNumParamters(){
		return fSize*sSize;
	}
	
	public int getModelTrainingIteration(){
		return forward._currentTrainingIteration;
	}
	
	public int getModelMaxTrainingIteration(){
		return forward._numberTrainingIterations;
	}
	
	@Override
	public double getOriginalLikelihood() {
		return (backwardOriginalLikelihood+forwardOriginalLikelihood)/2;
	}
	
	
	
	public void updateStats(SentenceConstrainedProjectionStats stats) {
		stats.objective = getObjective();
		stats.likelihood = (forwardNewLikelihood+backwardNewLikelihood)/2;
		stats.originalLikelihood = (forwardOriginalLikelihood+backwardOriginalLikelihood)/2;;
		stats.sentenceNumber = forward._sentenceNumber;
	}
	
	
	/**
	 * Update the lambdas.  Note: the getStepSize code requires that we create a new 
	 * Trellis for this.lambda rather than overwriting contents of this.lambda. 
	 * @param ascentDirection
	 * @param stepSize
	 * @return
	 */
	public void updateLambda(Trellis ascentDirection, double stepSize, SentenceConstrainedProjectionStats stats){
		lambdaNorm =0;
		Trellis newLambda = new Trellis(fSize,sSize);
		for(int si =0; si < sSize; si++){
			for(int fi =0; fi < fSize; fi++){
				double newLambdai =lambda.getProb(fi, si) + stepSize*ascentDirection.getProb(fi, si);
				if(Double.isNaN(newLambdai)){
					System.out.println("Lmabda is not a number step " + stepSize);
					System.out.println("old prob " + lambda.getProb(fi, si));
					ascentDirection.print(fSize, sSize);
					throw new RuntimeException();
				}
				newLambda.setProb(fi, si, newLambdai);
				lambdaNorm += newLambdai*newLambdai;
			}
		}
		lambdaNorm = Math.sqrt(lambdaNorm);
		lambda=newLambda;
	//	System.out.println("up lambda step" + stepSize + "ascentD norm " + gradientNorm(ascentDirection) + " lambdaN " + lambdaNorm);
		updateModel(stats);
	}

	/**
	 * Update the lambdas.  Note: the getStepSize code requires that we create a new 
	 * Trellis for this.lambda rather than overwriting contents of this.lambda. 
	 * @param ascentDirection
	 * @param stepSize
	 * @return
	 */
	public void setLambda(Trellis setLambdaTo, SentenceConstrainedProjectionStats stats){
		lambdaNorm =0;
		Trellis newLambda = new Trellis(fSize,sSize);
		for(int si =0; si < sSize; si++){
			for(int fi =0; fi < fSize; fi++){
				double newLambdai = setLambdaTo.getProb(fi, si);
				newLambda.setProb(fi, si, newLambdai);
				lambdaNorm += newLambdai*newLambdai;
			}
		}
		lambdaNorm = Math.sqrt(lambdaNorm);
		lambda=newLambda;
		updateModel(stats);
	}

	
	/**
	 * @param gradient
	 * @param lambdas
	 * @param optimizationPrecision
	 * @return
	 */
	public final double gradientNorm(Trellis gradient){
		return Math.sqrt(dotProduct(gradient,gradient));
	}
	
	/**
	 * Given a set of constrains it updates the model posteriors.
	 * @param lambdas
	 * @param originalProbCache
	 */
	public void updateModel(SentenceConstrainedProjectionStats stats){
		//System.out.println();
		for (int si = 0; si < sSize; si++) {
			for (int fi = 0; fi < fSize; fi++) {
				double prob = lambda.getProb(fi, si);
				double forwardExpC = Math.exp(-prob);
				double backwardExpC = Math.exp(+prob);
				
//				if(Double.isInfinite(forwardExpC) || Double.isNaN(forwardExpC) || Double.isInfinite(backwardExpC) || Double.isNaN(backwardExpC)){
//					
//					System.out.println("Objecive")
//					System.out.println(" lambda["+si+"]["+fi+"] " + prob + " forwardExp " + forwardExpC + " backwardExp " + backwardExpC);
//					System.out.println(" Update to prob cache are not a number " + forwardExpC + " back " + backwardExpC);
//					System.out.println("Constrain " + prob);
//					throw new RuntimeException();
//				}
				forward._probCache.setProb(fi, si, origProbCacheForward.getProb(fi, si)*forwardExpC);
				backward._probCache.setProb(si, fi, origProbCacheBackward.getProb(si, fi)*backwardExpC);
			}
		}
		//Neet to fill values for null
		for (int si = 0; si < sSize; si++) {
			backward._probCache.setProb(si, fSize, origProbCacheBackward.getProb(si, fSize));
		}
		for (int fi = 0; fi < fSize; fi++) {
			forward._probCache.setProb(fi, sSize, origProbCacheForward.getProb(fi, sSize));
		}
		
		forward.makePosteriors(sSize, fSize);
		forwardNewLikelihood = forward.getRealLikelihood();
		backward.makePosteriors(fSize, sSize);
		backwardNewLikelihood = backward.getRealLikelihood();
		stats.fbcall++;
		
		//If any of the models get a likelihood close to zero we backoff to old model
//		if(forward.getRealLikelihood() < 1.E-50 || forward.getRealLikelihood() < 1.E-50){
//			System.out.println("Backing off to old model");
//			
//			
//			for (int si = 0; si < sSize; si++) {
//				for (int fi = 0; fi < fSize; fi++) {
//					forward._probCache.setProb(fi, si, origProbCacheForward.getProb(fi, si));
//					backward._probCache.setProb(si, fi, origProbCacheBackward.getProb(si, fi));
//				}
//			}
//			for (int si = 0; si < sSize; si++) {
//				backward._probCache.setProb(si, fSize, origProbCacheBackward.getProb(si, fSize));
//			}
//			for (int fi = 0; fi < fSize; fi++) {
//				forward._probCache.setProb(fi, sSize, origProbCacheForward.getProb(fi, sSize));
//			}
//			forward.makePosteriors(sSize, fSize);
//			forwardNewLikelihood = forward.getRealLikelihood();
//			backward.makePosteriors(fSize, sSize);
//			backwardNewLikelihood = backward.getRealLikelihood();
//			stats.fbcall++;
//		}
	}
	

	/**
	 * For steepest descent the direction is -gradient
	 * @param gradient
	 * @return
	 */
	public Trellis getAscentDirection(Trellis gradient){
		return gradient;
	}
	
	

			


	public Trellis getNumericalGradient(){
		Trellis gradient = new Trellis(fSize,sSize);
		Trellis oldLambda = lambda.deppCopy();
		double oldObjective = getObjective();
				
		double epsilon = 1e-10;
		for (int i = 0; i < fSize; i++) {
			for (int j = 0; j < sSize; j++) {
				
				lambda.setProb(i, j, oldLambda.getProb(i, j)+epsilon);
//				lambda.print(fSize,sSize);
//				if(i==0 && j ==0){
//					System.out.println("f get prob " + forward.getObservationProbability(i, j));
//					System.out.println("f init prob " + forward.getInitProb(j, sSize));
//					System.out.println("b get prob " + backward.getObservationProbability(j, i));
//					System.out.println("b init prob " + backward.getInitProb(i, fSize));
//					MyArrays.printDoubleArray(forward._likelihoodScalors,"before foward likelihood scallor");
//					MyArrays.printDoubleArray(backward._likelihoodScalors,"before backward likelihood scallor");
//				}
				setLambda(lambda, new SentenceConstrainedProjectionStats(sSize,fSize,forward._sentenceNumber));
//				if(i==0 && j ==0){
//					System.out.println("f get prob " +forward.getObservationProbability(i, j));
//					System.out.println("f init prob " + forward.getInitProb(j, sSize));
//					System.out.println("b get prob " +backward.getObservationProbability(j, i));
//					System.out.println("b init prob " + backward.getInitProb(i, fSize));
//					MyArrays.printDoubleArray(forward._likelihoodScalors,"agter foward likelihood scallor");
//					MyArrays.printDoubleArray(backward._likelihoodScalors,"after backward likelihood scallor");
//				}
				gradient.setProb(i, j, (getObjective()-oldObjective)/epsilon);
				lambda.setProb(i, j, oldLambda.getProb(i, j));
			}
		}
		setLambda(oldLambda, new SentenceConstrainedProjectionStats(sSize,fSize,forward._sentenceNumber));
		return gradient;
	}

	
	public Trellis getGradient(){
	//	if (true) return getNumericalGradient();
		Trellis gradient = new Trellis(fSize,sSize);
		gradient.clear(0);
		double norm = (forwardNewLikelihood/forwardOriginalLikelihood)
		+(backwardNewLikelihood/backwardOriginalLikelihood);
		
		if(Double.isNaN(norm) || Double.isInfinite(norm)){
			System.out.println("Gradient norm is nan");
			System.out.println("fnl" + forwardNewLikelihood + " bnl" + backwardNewLikelihood);
			System.out.println("objetive" + getObjective());
			throw new RuntimeException();
		}
		for(int si = 0; si < sSize; si++){
			for(int fi = 0; fi < fSize; fi++){
				double value=  forward._statePosteriors.getProb(fi, si)*(forwardNewLikelihood/forwardOriginalLikelihood)
								-backward._statePosteriors.getProb(si, fi)*(backwardNewLikelihood/backwardOriginalLikelihood) ;	
				value = value/norm;
				//Add slack
				if(lambdaNorm != 0){
				//	System.out.print ("Grad before " + value);
					value -= slack*lambda.getProb(fi,si)/lambdaNorm;
				//	System.out.println(" Grad after " + value);
				}
				gradient.setProb(fi, si, value);
			}
		}	
		
	//	Debug code see if gradient is equal to numericGradient
//		Trellis numericGradient = getNumericalGradient();
//		for(int ssi = 0; ssi < sSize; ssi++){
//			for(int sfi = 0; sfi < fSize; sfi++){
//				if(Math.abs(gradient.getProb(sfi, ssi) - numericGradient.getProb(sfi, ssi)) < 1.E-20){
//					System.out.println("gradient and numerical gradient differ");
//					System.out.println("gradient");
//					gradient.print(fSize, sSize);
//					System.out.println("numerical gradient");
//					numericGradient.print(fSize, sSize);
//					System.exit(-1);
//				}
//			}
//		}
		
//		gradient.printWithSum(" Gradient " , fSize, sSize);
		return gradient;
	}
	
	
	/**
	 *
	 * @return
	 */
	public double getObjective(){
		double objective = 0;
		
		//System.out.println("getting objective nfl" + forwardNewLikelihood + " ofl " + forwardOriginalLikelihood + " nbl " + backwardNewLikelihood + " obl " + backwardOriginalLikelihood);
		objective = -Math.log(((forwardNewLikelihood/forwardOriginalLikelihood)
				            +(backwardNewLikelihood/backwardOriginalLikelihood))/2);
//		if(Double.isNaN(objective)){
//			System.out.println("Objetive is non a number");
//			System.out.println("Origianl likelihood f" + forwardOriginalLikelihood + " back " + backwardOriginalLikelihood);
//			System.out.println("New likelihood f" + forwardNewLikelihood + " back " + backwardNewLikelihood);
//			System.exit(-1);
//		}
		//System.out.print("Obje without " + objective);
		objective -= slack*lambdaNorm;
		//System.out.println(" Obje with slack " + objective);
		return objective;
	}
	
	
}
