package edu.gatech.cc.liam.marl.decpomdp;

import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map.Entry;

import edu.gatech.cc.liam.core.Globals;
import edu.gatech.cc.liam.core.rl.models.decpomdp.DecMDP;
import edu.gatech.cc.liam.core.rl.models.decpomdp.DecMDPSingleStagePolicy;

public class DMDPValueFunction implements Iterable<ValueVector> {
	private LinkedList<ValueVector> valueFunction;
	private DecMDP dmdp;
	
	public DMDPValueFunction(DecMDP dmdp) {
		valueFunction = new LinkedList<ValueVector>();
		this.dmdp = dmdp;
	}
	public DMDPValueFunction(DecMDP dmdp, Collection<ValueVector> initialVV) {
		valueFunction = new LinkedList<ValueVector>(initialVV);
		this.dmdp = dmdp;
	}
	public DMDPValueFunction(DMDPValueFunction toClone) {
		valueFunction = new LinkedList<ValueVector>(toClone.valueFunction);
		dmdp = toClone.dmdp;
	}

	public int getNumSupports() {
		return valueFunction.size();
	}
	public void addSupport(ValueVector v) {
		if(!hasVector(v)) {
			valueFunction.add(v);
		}
	}
	private boolean hasVector(ValueVector pv) {
		for(ValueVector pv2 : valueFunction) {
			if(pv.softEquals(pv2))
				return true;
		}
		return false;
	}
	
	public DecMDPSingleStagePolicy getActions(Belief b) {
		double valueAtBelief = Double.NEGATIVE_INFINITY;
		DecMDPSingleStagePolicy bestAction = null;
		for(ValueVector v : valueFunction) {
			double value = v.getValue(b);
			if(value > valueAtBelief) {
				valueAtBelief = value;
				bestAction = v.nextActions;
			}
		}
		return bestAction;
	}
	public void updateAllBeliefs(Collection<Belief> bs) {
//		for(Belief b : bs) {
//			assert(isBeliefValuePessimistic(b));
//		}
		for(ValueVector v : valueFunction)
			v.supportedBs.clear();
		for(Belief b : bs) {
			b.updateToBest(valueFunction);
			b.getSupport().supportedBs.add(b);
			b.updateSuccValues(valueFunction);
		}
		LinkedList<ValueVector> toRemove = new LinkedList<ValueVector>();
		for(ValueVector vv : valueFunction) {
			if(vv.supportedBs.isEmpty())
				toRemove.add(vv);
		}
		valueFunction.removeAll(toRemove);
		for(Belief b : bs) {
//			assert(isBeliefValuePessimistic(b));
		}
	}
	public double findValueAtBelief(Belief b) {
		double valueAtBelief = Double.NEGATIVE_INFINITY;
		for(ValueVector v : valueFunction) {
			double value = v.getValue(b);
			valueAtBelief = Math.max(valueAtBelief, value);
		}
		return valueAtBelief;
	}

	public ValueVector findVVAtBelief(Belief b) {
		double valueAtBelief = Double.NEGATIVE_INFINITY;
		ValueVector bVV = null;
		for(ValueVector v : valueFunction) {
			double value = v.getValue(b);
			if(value > valueAtBelief) {
				valueAtBelief = value;
				bVV = v;
			}
		}
		return bVV;
	}

	public double simulateBestPolicy(Belief startB, int depth) {
		return  simulateBestPolicy(startB, Math.pow(dmdp.discount, depth));
	} 
	public double simulateBestPolicy(Belief startB, double epsilon) {
		double accumulatedReward = 0.0;
		Belief curB = startB.clone();
		for(double dF = 1.0; dF > epsilon; dF *= dmdp.discount) { // discount factor
			DecMDPSingleStagePolicy nextAction = this.getActions(curB);
			Belief successorB = dmdp.getSuccessorBelief(curB, nextAction);
			double expectedReward = getExpectedReward(curB, nextAction);
//			System.out.println("At belief " + lastB.toString() + 
//							   "\ntook action " + 
//							   baseModel.getActionString(jTjADistrPolicy, lastB) +
//							   " with expected reward " + expectedReward);
//			assert(expectedReward + (dmdp.discount * this.findValueAtBelief(successorB))
//					>= this.findValueAtBelief(curB) - Globals.TOLERANCE);
			accumulatedReward += expectedReward * dF;
			curB = successorB;
		}
//		System.out.println("Final utility achieved: " + accumulatedReward + "  ******** ");
		return accumulatedReward;
	}

	public boolean isBeliefValuePessimistic(Belief b) {
		DecMDPSingleStagePolicy nextAction = this.getActions(b);
		double expectedReward = getExpectedReward(b, nextAction);
		Belief successorB = dmdp.getSuccessorBelief(b, nextAction);
		assert(expectedReward + (dmdp.discount * this.findValueAtBelief(successorB))
				 >= this.findValueAtBelief(b) - Globals.TOLERANCE);
		return true;
	}
	
	private double getExpectedReward(Belief b, DecMDPSingleStagePolicy nextAction) {
		double expectedReward = 0.0;
		double r;
		for(Entry<Integer, Double> e : b.getProbDist()) {
			int jT = e.getKey();
			for(int jA=0; jA < dmdp.numJActions; jA++) {
				r = dmdp.getReward(jT, jA) * nextAction.getProb(jT,jA) * e.getValue();
				expectedReward += r;
			}
		}
		return expectedReward;
	}
	@Override
	public Iterator<ValueVector> iterator() {
		return valueFunction.iterator();
	}
}