package edu.gatech.cc.liam.marl.decpomdp;

import edu.gatech.cc.liam.core.Globals;
import edu.gatech.cc.liam.core.rl.models.CartesianIterator;
import edu.gatech.cc.liam.core.rl.models.JointIterator;
import edu.gatech.cc.liam.core.rl.models.decpomdp.DecPOMDPPolicy;
import edu.gatech.cc.liam.geometry.linear.NPoint;

public class PPL0ApproxPolicy implements DecPOMDPPolicy {

	private final PPL0Approximator ppa;
	private final DMDPValueIteration approxVF;

	private Belief currentPublicB; // belief of joint-types in the DecMDP aprpoximation
//	private Belief[] currentPrivateL0B; // level-0 beliefs across the state of the DecPOMDP problem
	private int[] currentApproxTypes; // previous type of agent
	
	private int[][] lastActionPerType; // [agent(no nature)][type] = action
	
	private int numAgents; // number of agents in the base problem (no nature)
	
	public PPL0ApproxPolicy(PPL0Approximator ppa, DMDPValueIteration approxVF) {
		super();
		this.ppa = ppa;
		this.approxVF = approxVF;
		numAgents = ppa.problem.numAgents;
	}

	/***
	 */
	@Override
	public int[] getInitialActions(Belief startingB) {
		currentPublicB = new Belief(ppa.getStartingDist(startingB.values));
		approxVF.computeValueAtBelief(currentPublicB);
		currentApproxTypes = new int[ppa.problem.numAgents];
		for(int i=0; i<numAgents; i++) {
			currentApproxTypes[i] = ppa.getClosestType(startingB, i);
		}
		return findActions();
	}
	
	@Override
	public int[] getActions(int[] observations) {
		// update private beliefs 
		// (based on previous policy choices, which depends on previous public belief)
		int[][] JtJa = getJAforeachJT();
		// for each joint-type (where the agent is their correct type)
	    // compute the joint action taken and compute the resulting belief state for that agent.
		// Average these beliefs by the prob of the joint-type.
		for(int a=0; a<numAgents; a++) {
			// level 0 belief is distribution over states in the DecPOMDP
			NPoint newL0B = new NPoint(ppa.problem.numStates, 0.0); 
			int[] numT = ppa.approxP.typeSS.clone();
			numT[a+1] = 1; 								  // don't iterate over i's types
			for(int[] jT : new CartesianIterator(numT)) { // for each joint-type_{-i)
				jT[a+1] = currentApproxTypes[a];
				int jTi = JointIterator.getJointIndex(ppa.approxP.typeSS, jT);
				if(currentPublicB.values[jTi] < Globals.TOLERANCE)
					continue; // no chance this joint-type happened
				int obs = observations[a];
				int jAi = JointIterator.getJointIndex(ppa.problem.actionSS, JtJa[jTi]);
				double[] l0belief = ppa.l0Beliefs[a].get(currentApproxTypes[a]).values;
				NPoint nextBforJT = ppa.problem.getNextL0Belief(a, l0belief, obs, jAi);
				nextBforJT.scale(currentPublicB.values[jTi]);
				newL0B.add(nextBforJT);
			}
			newL0B.scale(1.0/newL0B.l1norm());
			currentApproxTypes[a] = ppa.getClosestType(newL0B, a);
		}
		// update the current public belief
		double[][] jTjADistrPolicy = currentPublicB.supportVector.jTjADistrPolicy;
		currentPublicB = ppa.approxP.getSuccessorBelief(currentPublicB, jTjADistrPolicy);
		approxVF.computeValueAtBelief(currentPublicB);
		// now that beliefs have been updated, find the actions
		return findActions();
	}

	private int[][] getJAforeachJT() { // find last joint-actions per joint-type
		int[][] JtJa = new int[ppa.approxP.numJTypes][numAgents];
		for(int[] jT : new CartesianIterator(ppa.approxP.typeSS)) {
			int jTi = JointIterator.getJointIndex(ppa.approxP.typeSS, jT);
			for(int a=0; a<numAgents; a++) { 
				JtJa[jTi][a] = lastActionPerType[a+1][jT[a+1]];  // jT includes nature, JtJa does not
			}
		}
		return JtJa;
	}

	private int[] findActions() {
		// assume that beliefs have been updated, get best one step policy
		double[][] jTjADistrPolicy = currentPublicB.supportVector.jTjADistrPolicy;
		// find actions for that policy
		this.lastActionPerType = ppa.approxP.getAgentActions(jTjADistrPolicy, currentPublicB);
		int[] jA = new int[numAgents];
		for(int i=0; i<numAgents; i++) {
			// find out which action that agent would take (skip nature)
			jA[i] = lastActionPerType[i+1][currentApproxTypes[i]];
			assert jA[i] != -1;
		}
		return jA;
	}
	
	
}
