package edu.gatech.cc.liam.core.rl.models.decpomdp;

import edu.gatech.cc.liam.core.Globals;
import edu.gatech.cc.liam.core.rl.models.CartesianIterator;
import edu.gatech.cc.liam.marl.decpomdp.Belief;


public class DecMDPSingleStagePolicy {
	private final DecMDP dmdp;
	private double[] jTjAProbs;
	
	public DecMDPSingleStagePolicy(DecMDP dmdp, double[] jTjAProbs) {
		this.dmdp = dmdp;
		this.jTjAProbs = jTjAProbs;
		assert validate();
	}
	public DecMDPSingleStagePolicy(DecMDP dmdp, int[][] typeAtions) {
		this.dmdp = dmdp;
		jTjAProbs = new double[dmdp.noNature.numJTypes * dmdp.noNature.numJActions];
		for(int[] jT : new CartesianIterator(dmdp.noNature.typeSS)) {
			int[] jointTaken = new int[dmdp.noNature.numAgents];
			for(int i=0; i<dmdp.noNature.numAgents; i++) {
				jointTaken[i] = typeAtions[i][jT[i]];
			}
			int index = dmdp.noNature.getVarIndex(jT, jointTaken);	
			jTjAProbs[index] = 1.0;
		}
		assert validate();
	}
	
	public double getProb(int jTypeIndex, int jActionIndex) {
		int fullIndex = dmdp.getVarIndex(jTypeIndex, jActionIndex);
		return getProb(fullIndex);
	}
	public double getProb(int fullIndex) {
		int noNatureIndex = dmdp.getNoNatureIndex(fullIndex);
		return jTjAProbs[noNatureIndex];
	}
	
	public boolean validate() {
		for(int[] jT : new CartesianIterator(dmdp.noNature.typeSS)) {
			double sum = 0;
			for(int[] jA : new CartesianIterator(dmdp.noNature.actionSS)) {
				int index = dmdp.noNature.getVarIndex(jT, jA);	
				sum += jTjAProbs[index];
			}
			if (Math.abs(sum - 1.0) > Globals.TOLERANCE)
				return false;
		}
		return true;
	}
	
	/**
	 * Computes the actions taken by each agent type.
	 * Assumes actions are taken deterministically (every joint-type has exactly one joint-action)
	 */
	public String getActionString() {
		return getActionString(Belief.makeUniformB(dmdp.numJTypes));
	}
	public String getActionString(Belief b) {
		boolean[][] typesPossible = dmdp.whichTypesInBelief(b);
		int[] firstPossibleJT = dmdp.getFirstPossibleJType(b);
		// for each player and type find out which action they take
		String[][] typeAtions = new String[dmdp.numAgents][]; // typeAtions[agent][type] = action id taken
		for(int agent=0; agent<dmdp.numAgents; agent++) {
			typeAtions[agent] = new String[dmdp.typeSS[agent]];
			for(int t=0; t<dmdp.typeSS[agent]; t++) {
				if(!typesPossible[agent][t]) {
					typeAtions[agent][t] = "n/a";
				} else {
					typeAtions[agent][t] = "";
					int[] jT = firstPossibleJT.clone();
					jT[agent] = t; // examine the jT with all other agents are in their first possible type
					int jTi = dmdp.getJointTypeIndex(jT);
					for(int[] jA : new CartesianIterator(dmdp.actionSS)) {
						int jAi = dmdp.getJointActionIndex(jA);
						if(getProb(jTi,jAi) > Globals.TOLERANCE) {
							typeAtions[agent][t] += "/" + 
												    dmdp.actions[agent].get(jA[agent]) + ":" +
													Globals.decimal5.format(getProb(jTi,jAi));
						}
					}
				}
			}
		}
		String actionStr = "";
		for(int agent=0; agent<dmdp.numAgents; agent++) {
			actionStr += "Player " + agent + ": ";
			for(int t=0; t<dmdp.typeSS[agent]; t++) {
				if(typeAtions[agent][t] != "n/a")
					actionStr += "t:" + dmdp.types[agent].get(t) + "->" + typeAtions[agent][t] + " | ";
			}
			actionStr += "\n";
		}
		return actionStr;
	}
	
	 // getAgentActions[agent][type] = action ID taken
	public int[][] getAgentActions(Belief b) {
		boolean[][] typesPossible = dmdp.whichTypesInBelief(b);
		int[] firstPossibleJT = dmdp.getFirstPossibleJType(b);
		int[][] agentActions = new int[dmdp.numAgents][]; // agentActions[agent][type] = action ID taken
		for(int agent=0; agent<dmdp.numAgents; agent++) {
			agentActions[agent] = new int[dmdp.typeSS[agent]];
			for(int t=0; t<dmdp.typeSS[agent]; t++) {
				agentActions[agent][t] = -1;
				if(!typesPossible[agent][t])
					continue;
				int[] jT = firstPossibleJT.clone();
				jT[agent] = t;
				int jTi = dmdp.getJointTypeIndex(jT);
				for(int[] jA : new CartesianIterator(dmdp.actionSS)) {
					int jAi = dmdp.getJointActionIndex(jA);
					if(this.getProb(jTi,jAi) > Globals.TOLERANCE) {
						assert(agentActions[agent][t] == -1 ||
							   agentActions[agent][t] == jA[agent]); // only one action per agent-type
						// otherwise agent's polices are stochastic 
						// (which shouldn't be the case for optimal policies)
						agentActions[agent][t] = jA[agent];
					}
				}
			}
		}
		return agentActions;
	}
	@Override
	public String toString() {
		return getActionString();
	}
}
