package solvers.vi;

import globals.CentralStatics;
import globals.Tuple;

import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Random;

import problems.CachedTO;
import problems.DiscreteActions;
import problems.DiscreteObservations;
import problems.EnumerableStates;
import problems.Mopomdp;

import value.AlphaMatrix;
import value.CPruner;
import value.LazyScalarizedVector;
import value.ParetoPruner;
import value.SimpleVector;

public class ValueIteratorPO<V extends SimpleVector<A>, S, A, O>{
	
	public ArrayList<V> alphas;
	private CPruner<V> cpruner;
	private V arche;//Ugly hack, but JAVA.. 
	
	private ArrayList<V> r_aVectors; //The immediate (possibly MO) rewards per action, for each state
	private Mopomdp<S,A,O> m;
	
	private CachedTO<S,A,O> cachedTO;
	private ArrayList< ArrayList<ArrayList<V>> > cacheAlphaAO;
	
	private static boolean NO_SLACKING = false; // turns off the laziness
	
	private static boolean DEBUG = false;
	private static boolean ROUTPUT = false;
	private static boolean ROUTPUT_FINAL = false;
	private static boolean IN_BETWEEN_OUTPUT = false;
	private static int COUNTER = 0;
	private static double PRECISE = 0.000001;
	
	public ValueIteratorPO(Mopomdp<S,A,O> problem, ArrayList<V> initialVectors){
		this.alphas = initialVectors;
		this.arche = alphas.get(0);
		this.cpruner = new CPruner<V>();
		this.m = problem;
		this.cachedTO = null;
	}
	
	/** The following method calculates a full backup of the alpha vectors. 
	 *  It applies incremental pruning (calling CPrune between all cross-sums
	 *  between V^{a,o}, and after the union over actions)
	 *  If the calcImprovement option is used, it calculates and returns the 
	 *  maximal improvement (across all beliefs) of applying this backup. 
	 *  If this option is not used the method returns a 0. 
	 */
	@SuppressWarnings("unchecked")
	public double fullBackup(DiscreteActions<A,S> aSet, 
			DiscreteObservations<O> oSet, EnumerableStates<S> sSet, 
			boolean calcImprovement){
		CachedTO<S,A,O> cache = this.getCacheTO(aSet, oSet, sSet);

		double maxImprovement = 0;
		this.initRewardMatrices(aSet);
		
		ArrayList<V> vPrime = new ArrayList<V>(alphas.size()*2);
		for(int a=0; a<aSet.size(); a++){
			ArrayList<V> va = new ArrayList<V>(alphas.size()*2);
			for(int o=0; o<oSet.size(); o++){
				ArrayList<V> vao = new ArrayList<V>(alphas.size()*2);
				for(int alf=0; alf<this.alphas.size(); alf++){
					V alpha = this.alphas.get(alf);
					double[][] probMat = new double[sSet.numberOfStates()][sSet.numberOfStates()];
					for(int s=0; s<sSet.numberOfStates(); s++){
						for(int s_=0; s_<sSet.numberOfStates(); s_++){
							probMat[s][s_] = cache.getTO(s, a, o, s_);
						}
					}
					V newAlpha = (V) alpha.linearTransformation(probMat);
					newAlpha.tag(aSet.actionIdentifier(a));
					vao.add(newAlpha);
				}
				vao = cpruner.prune(vao);
				va = this.crosssum(va, vao);
				if(NO_SLACKING){
					for(int cnt=0; cnt<va.size(); cnt++){
						va.get(cnt).process();
					}
				}
				va = cpruner.prune(va);
			}
			if(m.isDiscounted()){
				for(int cnt=0; cnt<va.size(); cnt++){
					V vector = va.get(cnt);
					vector.multiplyByScalar(m.getGamma());
				}
			}
			for(int i=0; i<va.size(); i++){
				va.get(i).process();
			}
			ArrayList<V> singleton = singleton(this.r_aVectors.get(a));
			va = this.crosssum(singleton, va);
			for(int cnt=0; cnt<va.size(); cnt++){
				va.get(cnt).tag(aSet.actionIdentifier(a));
			}
			vPrime.addAll(va);
		}
		vPrime = cpruner.prune(vPrime);
		for(int i=0; i<vPrime.size(); i++){
			vPrime.get(i).process();//Lazy evaluation for scalarized vectors
		}
		if(calcImprovement){
			for(int i=0; i<vPrime.size(); i++){
				Tuple<Double, double[]> imp = cpruner.findImprovementAndWeight(vPrime.get(i), this.alphas);
				double val = vPrime.get(i).linearScalValue(imp.y);
				//System.out.println(imp.y.length);
				if(imp.x >maxImprovement){
					maxImprovement = imp.x;
				}
			}
		}
		this.alphas = vPrime;
		return maxImprovement;
	}
	
	public long valueIteration(DiscreteActions<A,S> aSet, 
			DiscreteObservations<O> oSet, EnumerableStates<S> sSet, double stopCrit){
		
		if(this.alphas==null){
			System.err.println("Alphas not initialized.");
			System.exit(-42);
		}
		long totalTimeUsed = 0;
		
		double imp = Double.MAX_VALUE; 
		int cnt = 0;
		//if(IN_BETWEEN_OUTPUT) System.out.println(CentralStatics.vectorsTo2DROutput(alphas, "v"+cnt));
		while(imp > stopCrit){
			long time = System.currentTimeMillis();
			imp = this.fullBackup(aSet, oSet, sSet, true);
			long timeUsed = System.currentTimeMillis() - time;
			totalTimeUsed += timeUsed;
			if(IN_BETWEEN_OUTPUT){
				System.out.println(++cnt +"-th iteration. \n Improvement: "+imp);
				System.out.println(" Runtime of backup: "+timeUsed);
				System.out.println(" Number of vectors: "+this.alphas.size());
			}
			//cnt++;
			if(ROUTPUT)
				System.out.println(CentralStatics.vectorsTo2DROutput(alphas, "v"+cnt));
		}
		return totalTimeUsed;
	}
	
	/**
	 * 
	 * @param belief-point for which to backup
	 * @return new value in value at belief-point, and a new alpha vector/matrix
	 */
	@SuppressWarnings("unchecked")
	public Tuple<Double,V> pointBasedBackup(double[] belief, DiscreteActions<A,S> aSet, 
			DiscreteObservations<O> oSet, EnumerableStates<S> sSet){
		
		this.initRewardMatrices(aSet);
		ArrayList< ArrayList<ArrayList<V>> > aoCache = this.getAlphaAOCache(aSet, oSet, sSet);
		V newVec = (V) arche.zeroVector();
		double newVal = Double.NEGATIVE_INFINITY;
		for(int a=0; a<aSet.size(); a++){
			V newVecA = (V) arche.zeroVector();
			double newValA = 0;
			for(int o=0; o<oSet.size(); o++){
				int max = -1;
				double maxScalVal = Double.NEGATIVE_INFINITY;
				ArrayList<V> aoVectors = aoCache.get(a).get(o);
				for(int i=0; i<aoVectors.size(); i++){
					V vec = aoVectors.get(i);
					double val = vec.linearScalValue(belief); 
					
					//System.out.println("##"+val);
					
					if(val>maxScalVal){
						max = i;
						maxScalVal = val;
					}
				}
				//System.out.println("##"+aoVectors.size()+" max: "+max);
				newVecA = (V) newVecA.add(aoVectors.get(max));
				if(NO_SLACKING){
					newVecA.process();
				}
				newValA += maxScalVal;
			}
			if(m.isDiscounted()){
				newVecA.multiplyByScalar(m.getGamma());
				newValA *= m.getGamma();
			}
			newVecA = (V) newVecA.add(this.r_aVectors.get(a));
			newValA += this.r_aVectors.get(a).linearScalValue(belief);
			
			
			if(newValA>newVal){
				//this.r_aVectors.get(a).
				newVec = newVecA;
				newVal = newValA;
				newVec.tag(aSet.actionIdentifier(a));
				
				
			}
			
			
			//System.out.println("newVec action tag:  "+newVec.retrieveTags());
		}
		newVec.process();		
		return new Tuple<Double,V>(newVal,newVec);
	}
	
	public void resetAlphaAOCache(){
		this.cacheAlphaAO = null;
	}
	
	
	/**
	 * 
	 * @param aSet
	 * @param oSet
	 * @param sSet
	 * @return
	 */
	@SuppressWarnings("unchecked")
	public  ArrayList< ArrayList<ArrayList<V>> > getAlphaAOCache(DiscreteActions<A,S> aSet, 
			DiscreteObservations<O> oSet, EnumerableStates<S> sSet){
		
		if(this.cacheAlphaAO != null){
			return this.cacheAlphaAO;
		}
		
		this.cacheAlphaAO = new ArrayList< ArrayList<ArrayList<V>> >();
		
		CachedTO<S,A,O> cache = this.getCacheTO(aSet, oSet, sSet);
		for(int a=0; a<aSet.size(); a++){
			ArrayList<ArrayList<V>> va = new ArrayList<ArrayList<V>>();
			for(int o=0; o<oSet.size(); o++){
				ArrayList<V> vao = new ArrayList<V>(alphas.size()*2);
				for(int alf=0; alf<this.alphas.size(); alf++){
					V alpha = this.alphas.get(alf);
					double[][] probMat = new double[sSet.numberOfStates()][sSet.numberOfStates()];
					for(int s=0; s<sSet.numberOfStates(); s++){
						for(int s_=0; s_<sSet.numberOfStates(); s_++){
							probMat[s][s_] = cache.getTO(s, a, o, s_);
						}
					}
					V newAlpha = (V) alpha.linearTransformation(probMat);
					newAlpha.tag(aSet.actionIdentifier(a));
					vao.add(newAlpha);
				}
				vao = cpruner.prune(vao);
				va.add(vao);
			}
			this.cacheAlphaAO.add(va);
		}
		return this.cacheAlphaAO;
	}
	
	public void initRewardMatrices(DiscreteActions<A,S> aSet){
		if(this.r_aVectors == null){
			ArrayList<AlphaMatrix<A>> matties = rewardMatrices(aSet);
			this.r_aVectors = new ArrayList<V>(aSet.size());
			try{
				for(int i=0; i<matties.size(); i++){
					@SuppressWarnings("unchecked")
					V nw = ((V) arche.copyMatrix(matties.get(i)));
					r_aVectors.add(nw); 
				}
			} catch (Exception e){
				System.err.println(e.toString());
			}
		}
	}
	
	public ArrayList<LazyScalarizedVector<A>> rewardVectors(DiscreteActions<A,S> aSet, double[] wvec){
		LazyScalarizedVector.setCurrentScalarizationWeights(wvec);
		ArrayList<LazyScalarizedVector<A>> result = new ArrayList<LazyScalarizedVector<A>>(aSet.size());
		for(int a=0; a<aSet.size(); a++){
			A action = aSet.actionIdentifier(a);
			result.add(new LazyScalarizedVector<A>(this.rewardMatrix(action), wvec));
		}
		return result;
	}
	
	public ArrayList<AlphaMatrix<A>> rewardMatrices(DiscreteActions<A,S> aSet){
		ArrayList<AlphaMatrix<A>> result = new ArrayList<AlphaMatrix<A>>(aSet.size());
		for(int a=0; a<aSet.size(); a++){
			A action = aSet.actionIdentifier(a);
			result.add(this.rewardMatrix(action));
		}
		return result;
	}
	
	public AlphaMatrix<A> rewardMatrix(A action){
		AlphaMatrix<A> result  = new AlphaMatrix<A>(m.numberOfStates(),m.numberOfObjectives());
		for(int s=0; s<m.numberOfStates(); s++){
			double[] rew = m.reward(s, action);
			for(int o=0; o<m.numberOfObjectives(); o++){
				result.setValue(s, o, rew[o]);
			}
		}
		result.tag(action);
		return result;
	}
	
	public String valuesToString(){
		String[] states = m.getStateSet().stateList();
		String result="<"+this.alphas.size()+" value vectors>\n";
		for(int i=0; i<this.alphas.size(); i++){
			result+=this.alphas.get(i).toString(states,m.getActionSet());
			result+="\n";
		}
	    result+="</"+this.alphas.size()+" value vectors>\n";
		return result;
	}
	
	public void setLowerBoundAsValue(ArrayList<V> vs){
		this.alphas = vs;
	}
	
	private CachedTO<S,A,O> getCacheTO(DiscreteActions<A,S> aSet, 
			DiscreteObservations<O> oSet, EnumerableStates<S> sSet){
		if(this.cachedTO==null){
			this.cachedTO = new CachedTO<S,A,O>(sSet, aSet, oSet, m);
		}
		return this.cachedTO;
	}
	
	@SuppressWarnings("unchecked")
	private ArrayList<V> crosssum(ArrayList<V> l1, ArrayList<V> l2){
		if(l1.size()==0){
			return l2;
		} 
		if(l2.size()==0){
			return l1;
		}
		ArrayList<V> result = new ArrayList<V>(l1.size()*l2.size());
		for(int i=0; i<l1.size(); i++){
			V elem1 = l1.get(i);
			for(int j=0; j<l2.size(); j++){
				V elem2 = l2.get(j);
				V newElem = (V) elem1.add((V) elem2);
				result.add(newElem);
			}
		}
		return result;
	}
	
	public ArrayList<V> singleton(V v1){
		ArrayList<V> result = new ArrayList<V>(1);
		result.add((V) v1);
		return result;
	}
	
	public double perseusBackup(ArrayList<double[]> beliefPoints, DiscreteActions<A,S> aSet, 
			DiscreteObservations<O> oSet, EnumerableStates<S> sSet){
		double result = 0;
		Random r = CentralStatics.getCentralRandom();
		ArrayList<double[]> bSenorina = new ArrayList<double[]>(beliefPoints);
		ArrayList<V> newVecs = new ArrayList<V>();
		while(!bSenorina.isEmpty()){
			int elem = r.nextInt(bSenorina.size());
			Iterator<double[]> iter = bSenorina.iterator();
			double[] b = bSenorina.get(elem);
			bSenorina.remove(elem);
			if(!this.hasImproved(newVecs, b, PRECISE)){
				Tuple<Double,V> update = this.pointBasedBackup(b, aSet, oSet, sSet); ///backup(b,V)
				//System.out.println(update.y.retrieveTags());
				Tuple<Double,V> old = maxValue(this.alphas, b);
						
				double improvement = update.y.linearScalValue(b) -  old.x;
				if(DEBUG || update.x == 3.4775){
					System.out.println("Found: "+update.y.toString(sSet.stateList(), aSet));
					System.out.println("Old value: " + maxValue(this.alphas, b));
					System.out.println("New value: " + maxValue(newVecs, b));
					System.out.println("New value': " + update.x);
					System.out.println("New value'': " + update.y.linearScalValue(b));
					System.out.println("Improvement: " + improvement);
				}
				if(improvement > PRECISE){
					newVecs.add(update.y);
					if(improvement > result){
						result = improvement;
						
					}	
				} else {
					newVecs.add(old.y);
					//System.out.println("old:  "+old.y.retrieveTags());
					
				}
			}
		}
		if(ROUTPUT){
			System.out.println(CentralStatics.vectorsTo2DROutput(alphas, "before"+(++COUNTER)));
			System.out.println(CentralStatics.vectorsTo2DROutput(newVecs, "new"+(COUNTER)));
		}
		//this.alphas.addAll(newVecs);
		
		//{{{{{
		//Pareto pruning empirically seems to be fastest in MOTiger and MOMaze20:
		//CPruner<V> cp = new CPruner<V>();
		//this.alphas = cp.prune(this.alphas);
		
		//ParetoPruner<V> pp = new ParetoPruner<V>();
		//this.alphas = pp.prune(this.alphas);
		this.alphas = newVecs;
		//}}}}}
		if(ROUTPUT){
			System.out.println(CentralStatics.vectorsTo2DROutput(alphas, "after"+(COUNTER)));
		}
		this.resetAlphaAOCache();
		return result;
	}
	
	public long perseus(int nSamples, DiscreteActions<A,S> aSet, 
			DiscreteObservations<O> oSet, EnumerableStates<S> sSet, double stopCrit, int maxIterations){
		
		if(this.alphas==null){
			System.err.println("Alphas not initialized.");
			System.exit(-42);
		}
		
		BeliefSampler<S,A,O> bs = new BeliefSampler<S,A,O>(this.m);
		if(IN_BETWEEN_OUTPUT) System.out.println("Sampling...");
		ArrayList<double[]> bSampleSet = bs.randomExplorationPerseus(nSamples);
		return this.perseus(bSampleSet, aSet, oSet, sSet, stopCrit, maxIterations);
	}
	
	public long perseus(ArrayList<double[]> bSamples, DiscreteActions<A,S> aSet, 
			DiscreteObservations<O> oSet, EnumerableStates<S> sSet, double stopCrit, int maxIterations){
		
		if(this.alphas==null){
			System.err.println("Alphas not initialized.");
			System.exit(-42);
		}
		long totalTimeUsed = 0;
		
		double imp = Double.MAX_VALUE; 
		if(IN_BETWEEN_OUTPUT) System.out.println("Welcome to Perseus.");
		ArrayList<double[]> bSampleSet = bSamples;
		if(IN_BETWEEN_OUTPUT) {
			System.out.println("\nStarting iterations...");
			System.out.println("Problem "+m.toString()+" discount:"+m.getGamma());
			System.out.println(this.alphas.get(0).toString(sSet.stateList(), aSet));
			System.out.println(CentralStatics.vectorsTo2DROutput(alphas, "start"));
		}
		int cnt = 0;
		while(imp > stopCrit && cnt<maxIterations){
			long time = System.currentTimeMillis();
			imp = this.perseusBackup(bSampleSet, aSet, oSet, sSet);
			long timeUsed = System.currentTimeMillis() - time;
			totalTimeUsed+=timeUsed;
			if(IN_BETWEEN_OUTPUT){
				System.out.println(++cnt +"-th iteration. \n Improvement: "+imp);
				System.out.println(" Runtime of backup: "+timeUsed);
				System.out.println(" Number of vectors: "+this.alphas.size());
			}
		}
		if(ROUTPUT_FINAL){
			System.out.println(CentralStatics.vectorsTo2DROutput(this.alphas, "perseus"));
		}
		return totalTimeUsed;
	}
	
	
	
	private boolean hasImproved(ArrayList<V> newVecs, double[] b, double precision){
		double oldVal = maxValue(this.alphas, b).x;
		double newVal = maxValue(newVecs, b).x;
		if(newVal == Double.NEGATIVE_INFINITY){
			return false;
		}
		if(DEBUG){
			System.out.println("old:"+oldVal+" new:"+newVal);
		}
		return ( (newVal-oldVal) > precision );
	}
	
	private Tuple<Double, V> maxValue(ArrayList<V> as, double[] b){
		V resultY = null;
		double resultX = Double.NEGATIVE_INFINITY; 
		for(int i=0; i<as.size(); i++){
			V vec = as.get(i);
			double val = vec.linearScalValue(b);
			if(val>resultX){
				resultX = val;
				resultY = vec;
			}
		}
		return new Tuple<Double, V>(resultX, resultY);
	}
	
	@SuppressWarnings("unchecked")
	public void initializeMisereLowerBound(){
		V newVec = (V) arche.copyMatrix(m.misereMatrix());
		this.alphas = this.singleton(newVec);
		
	}
	
	public void aoCacheToOut(DiscreteActions<A,S> aSet, 
			DiscreteObservations<O> oSet, EnumerableStates<S> sSet){
		
		 ArrayList< ArrayList<ArrayList<V>> > cache = getAlphaAOCache(aSet, oSet, sSet);
		 for(int a = 0; a<aSet.size(); a++){
			 System.out.println("-----"+ "Action "+a+"-----");
			 for(int o = 0; o<oSet.size(); o++){
				 System.out.println("---"+ "Obs "+o+"---");
				 ArrayList<V> ao = cache.get(a).get(o);
				 for(int x = 0; x<ao.size(); x++){
					 System.out.println(ao.get(x).toString(sSet.stateList(), aSet));
				 }
			 }
		 }
	}
	
}
