package edu.gatech.cc.liam.marl.decpomdp;

import java.util.HashSet;
import java.util.Map.Entry;

import edu.gatech.cc.liam.core.Globals;
import edu.gatech.cc.liam.core.rl.models.decpomdp.DecMDP;
import edu.gatech.cc.liam.core.rl.models.decpomdp.DecMDPSingleStagePolicy;
import edu.gatech.cc.liam.geometry.linear.NPoint;
import edu.gatech.cc.liam.geometry.linear.SparsePoint;
import edu.gatech.cc.liam.geometry.linear.Vector;

public class ValueVector {
	private static final long serialVersionUID = 1L;
	// best joint-action policy 
	private NPoint jTVals;
	public DecMDPSingleStagePolicy nextActions;
	public DecMDP baseModel;
	public HashSet<Belief> supportedBs;
	public HashSet<Belief> hasSeen;
	
	private double[] typeActionValues;

	private ValueVector(DecMDPSingleStagePolicy nextActions, Belief targetB, DecMDP baseModel) {
		this.nextActions = nextActions;
		this.baseModel = baseModel;
		supportedBs = new HashSet<Belief>();
		supportedBs.add(targetB);
		hasSeen = new HashSet<Belief>();
	}
	private ValueVector(NPoint v, DecMDPSingleStagePolicy nextActions, Belief targetB, DecMDP baseModel) {
		this(nextActions, targetB, baseModel);
		jTVals = v;
		makeTypeActionValues();
	}
	public ValueVector(double initialValue, Belief targetB, DecMDP baseModel) {
		this(new NPoint(targetB.dimensions(), initialValue), null, targetB, baseModel);
		assert targetB.dimensions() == baseModel.numJTypes;
	}

	public ValueVector (DecMDPSingleStagePolicy nextActions, ValueVector successorVV, Belief b,
			DMDPValueFunction fv) {
		this(nextActions, b, successorVV.baseModel);
//		assert(fv.findVVAtBelief(baseModel.getSuccessorBelief(b,nextActions)) == successorVV);
		double[] typeActionValues = successorVV.getTypeActionValues();
		jTVals = new NPoint(baseModel.numJTypes);
		for(int jT=0; jT < baseModel.numJTypes; jT++) {
			for(int jA=0; jA < baseModel.numJActions; jA++) {
				int varIndex = baseModel.getVarIndex(jT, jA);
				assert nextActions.getProb(varIndex) <= 1.0 + Globals.TOLERANCE;
				jTVals.add(jT, typeActionValues[varIndex] * nextActions.getProb(varIndex));
			}
		}
		makeTypeActionValues();
	}
	
	public double getValue(Belief b) {
		return jTVals.dotProduct(b.getProbDist());
	}
	
	public double[] makeObjective(Belief b) {
		assert(b.dimensions() == baseModel.numJTypes);
		int numVars = baseModel.noNature.numJTypes *  baseModel.noNature.numJActions;
		double[] objective  = new double[numVars];
		// for each variable:
		for(Entry<Integer, Double> e : b.getProbDist()) {
			int jT = e.getKey();
			for(int jA=0; jA < baseModel.numJActions; jA++) {					  
				int fullIndex = baseModel.getVarIndex(jT, jA);
				int noNatureIndex = baseModel.getNoNatureIndex(fullIndex);
				objective[noNatureIndex] += typeActionValues[fullIndex] * e.getValue();
			}
		}
		return objective;
	}
	
	public double getValueIfSuccessor(Belief b) {
		assert(b.dimensions() == baseModel.numJTypes);
		DecMDPSingleStagePolicy jaction = b.getSupport().nextActions;
		double value = 0;
		// for each variable:
		for(Entry<Integer, Double> e : b.getProbDist()) {
			int jT = e.getKey();
			for(int jA=0; jA < baseModel.numJActions; jA++) {					  
				int varIndex = baseModel.getVarIndex(jT, jA);
				value += typeActionValues[varIndex] * e.getValue() * jaction.getProb(varIndex);
			}
		}
		return value;
	}
	
	
	/**
	 * @param successorV the (avg) value vector achieved in the next next state
	 * @return an array with each position [i] being the value of taking the joint-action-type i
	 */
	public double[] getTypeActionValues() {
		return typeActionValues;
	}
	private void makeTypeActionValues() {
		int numVars = baseModel.numJTypes *  baseModel.numJActions;
		double[] tav  = new double[numVars];
		// for each variable:
		for(int jT=0; jT < baseModel.numJTypes; jT++) {
			for(int jA=0; jA < baseModel.numJActions; jA++) {
				Vector transProbs = baseModel.getTransitionProbs(jT, jA);
				double varValue = (this.jTVals.dotProduct(transProbs) * baseModel.discount)
				                   + baseModel.getReward(jT, jA);								  
				int varIndex = baseModel.getVarIndex(jT, jA);
				tav[varIndex] = varValue;
			}
		}
		typeActionValues = tav;
	}
	
	public boolean softEquals(ValueVector v) {
		return this.jTVals.softEquals(v.jTVals);
	}
	
	@Override
	public int hashCode() {
		final int prime = 31;
		int result = prime + jTVals.hashCode();
		return result;
	}
	@Override
	public boolean equals(Object obj) {
		if (this == obj)
			return true;
		if (obj == null)
			return false;
		if (getClass() != obj.getClass())
			return false;
		return this.jTVals.equals(((ValueVector)obj).jTVals);
	}
	
	@Override
	public String toString() {
		return super.toString() + "\naction: " + 
			   ((baseModel!=null)&&(nextActions!=null) ? nextActions.getActionString():"");
	}
}
