package edu.gatech.cc.liam.core.rl.models.decpomdp;

import java.util.ArrayList;
import java.util.List;

import edu.gatech.cc.liam.core.DiscreteDistribution;
import edu.gatech.cc.liam.core.Globals;
import edu.gatech.cc.liam.core.rl.models.CartesianIterator;
import edu.gatech.cc.liam.core.rl.models.JointIterator;
import edu.gatech.cc.liam.geometry.linear.NPoint;
import edu.gatech.cc.liam.marl.decpomdp.Belief;
import edu.gatech.cc.liam.marl.decpomdp.PosgInputFileParser;

public class DecPOMDP {
	public final double discount;
	public final List<String> states;  // list of states
	public final List<String>[] actions; // names of actions per player
	public final List<String>[] observations; // names of actions per player
	private final double[][][] transitionFunction; // prob of [start-state][joint-action][end-state]
	private final double[][] rewardFunction; // reward of [start-state][joint-action]
	private final double[][][] obsFunction; // probability of [joint-action][end-state][joint-observation]
	public final double[] startDistr; // distribution over states
	
	//derived members
	public final int numAgents;
	public final int numStates;
	public final int numJActions;
	public final int numJObs;
//	public final List<List<String>> allJointActions;
	public final int[] actionSS;
	public final int[] obsSS;
	public final double[][][][] individualObsF;   // for [agent] prob of [observation]
												  // when in [state] after taking [joint-action]
	public DecPOMDP(double discount, List<String> states, List<String>[] observations,
			List<String>[] actions, double[][][] transitionFunction,
			double[][] rewardFunction, double[][][] obsFunction,
			double[] startDistr) {
		this.discount = discount;
		this.states = states;
		this.actions = actions;
		this.observations = observations;
		this.transitionFunction = transitionFunction;
		this.rewardFunction = rewardFunction;
		this.obsFunction = obsFunction;
		this.startDistr = startDistr;
		
		this.numAgents = actions.length;
		this.numStates = states.size();
		int jAs = 1;
		int jOs = 1;
		actionSS = new int[numAgents];
		obsSS = new int[numAgents];
		for(int i=0; i<numAgents; i++){
			actionSS[i] = actions[i].size();
			obsSS[i] = observations[i].size();
			jAs *= actionSS[i];
			jOs *= obsSS[i];
		}
		this.numJActions = jAs;
		this.numJObs = jOs;
//		allJointActions = DecMDP.makeJointList(actions, actionSS);
		individualObsF = makeObservationPrior();
	}
	
	public DecPOMDP(PosgInputFileParser pifp) {
		// discount
		this.discount = pifp.getDiscount();
		// agents
		this.numAgents = pifp.getAgents();
		// states
		this.numStates = pifp.getStates();
		this.states = new ArrayList<String>();
		for(int i=0; i<numStates; i++) {
			states.add(pifp.getStateName(i));
		}
		// actions
		this.numJActions = pifp.getActions();
		this.actions = new ArrayList[numAgents];
		this.actionSS = new int[numAgents];
		for(int i=0; i<numAgents; i++) {
			actions[i] = new ArrayList<String>();
			actionSS[i] = pifp.actions[i].length;
			for(String s : pifp.actions[i])
				actions[i].add(s);
		}
		// observations
		this.numJObs = pifp.getObs();
		this.observations = new ArrayList[numAgents];
		this.obsSS = new int[numAgents];
		for(int i=0; i<numAgents; i++) {
			observations[i] = new ArrayList<String>();
			obsSS[i] = pifp.observations[i].length;
			for(String s : pifp.observations[i])
				observations[i].add(s);
		}
		// transitionFunction
		transitionFunction = new double[numStates][numJActions][numStates];
		double[][][] pifpTrans = pifp.getTrnProbTable();
		for(int s1=0; s1<numStates; s1++) {
			for(int ja=0; ja<numJActions; ja++) {
				for(int s2=0; s2<numStates; s2++) {
					transitionFunction[s1][ja][s2] = pifpTrans[ja][s1][s2];
				}
			}
		}
		// observation function
		obsFunction = pifp.getObsProbTable();
		// reward function (take the expectation, which is decision theoretic equivalent)
		rewardFunction = new double[numStates][numJActions];
		double[][][][] pifpRewards = pifp.getRwdProbTable();
		for(int s1=0; s1<numStates; s1++) {
			for(int ja=0; ja<numJActions; ja++) {
				for(int s2=0; s2<numStates; s2++) {
					for(int ob=0; ob<numJObs; ob++) {
						rewardFunction[s1][ja] += pifpRewards[ja][s1][s2][ob] * 
												  pifpTrans[ja][s1][s2] *
												  obsFunction[ja][s2][ob];
					}
				}
			}
		}
		roundRewardsToTolerance();
		// start distribution
		this.startDistr = pifp.getStartValues();
		// generated prior
		individualObsF = makeObservationPrior();
	}
	
	private void roundRewardsToTolerance() {
		for(int i=0; i<numStates; i++) {
			for(int j=0; j<numJActions; j++) {
				long rounded = Math.round(rewardFunction[i][j]);
				if(Math.abs(rewardFunction[i][j] - rounded) < Globals.TOLERANCE)
					rewardFunction[i][j] = rounded;
			}
		}
	}
	
	public int getJointActionIndex(int[] actionInds) {
		return JointIterator.getJointIndex(actionSS, actionInds);	
	}
	
	/**
	 * @param state the underlying state
	 * @param jActions the joint actions taken
	 * @return a probability distribution over successor states s.t. return[state] is the 
	 * probability of ending up at the successor state
	 */
	public double getTransProb(int startState, int jActions, int nextState) {
		return transitionFunction[startState][jActions][nextState];
	}
	public double getObsProb(int jActions, int state, int jObservation) {
		return obsFunction[jActions][state][jObservation];
	}
	public double getReward(int startState, int jActions) {
		return rewardFunction[startState][jActions];
	}
	
	public NPoint getNextL0Belief(int agent, double[] agentBelief, int curOi, int jAi) {
		NPoint successorB = new NPoint(numStates, 0.0);
		for(int nextS=0; nextS<numStates; nextS++) {
			// b'(s')|o = p(o|s', a) * sum_s( p(s' | s,a) * b(s) )
			for(int startS=0; startS<numStates; startS++) {
				// for each possible starting state, what is the probability of 
				// the next state given the observation
				double nextSProb = getTransProb(startS, jAi, nextS) * agentBelief[startS];
				successorB.values[nextS] += nextSProb;
			}
			successorB.values[nextS] *= individualObsF[agent][curOi][nextS][jAi];
		}
		// normalize the probability distribution
		successorB.scale(1.0/successorB.l1norm());
		return successorB;
	}

//	public double simulateTrace(Belief startingB, DecPOMDPPolicy policyToFollow, int depth) {
//		assert startingB.dimensions() == this.numStates;
//		double accumulatedReward = 0.0;
//		int trueState = DiscreteDistribution.sampleDist(startingB.getProbDist());
//		int[] jointActions = policyToFollow.getInitialActions(startingB);
//		double gamma = 1.0;
//		for(int i=0; i<depth+1; i++) {
//			int jAi = JointIterator.getJointIndex(actionSS, jointActions);
//			accumulatedReward += rewardFunction[trueState][jAi] * gamma; // gamma = time discount
//			trueState = DiscreteDistribution.sampleDist(transitionFunction[trueState][jAi]);
//			int jOi = DiscreteDistribution.sampleDist(obsFunction[jAi][trueState]);
//			int[] jO = JointIterator.getJoint(obsSS, jOi);
//			jointActions = policyToFollow.getActions(jO);
//			gamma *= discount;
//		}
////		System.out.println("Final utility achieved: " + accumulatedReward + "  ******** ");
//		return accumulatedReward;
//	}
	
	/* 
	 * @return [agent][observation][state][joint-action] = 
	 * probability of [agent] seeing [observation] when in [state] after taking [joint-action]
	 */
	private double[][][][] makeObservationPrior() {
		double[][][][] obsPrior = new double[numAgents][][][];
		for(int i=0; i < numAgents; i++) {
			obsPrior[i] = new double[obsSS[i]][numStates][numJActions];
			for(int[] jO : new CartesianIterator(obsSS)) {
				int jOi = JointIterator.getJointIndex(obsSS, jO);
				for(int endS=0; endS < numStates; endS++) {
					for(int jAi=0; jAi < numJActions; jAi++) {
						obsPrior[i][jO[i]][endS][jAi] += obsFunction[jAi][endS][jOi];
					}
				}
			}
		}
		return obsPrior;
	}

}
