package edu.gatech.cc.liam.core.rl.models.decpomdp;

import java.io.ObjectInputStream.GetField;
import java.util.Arrays;
import java.util.List;

import edu.gatech.cc.liam.core.rl.models.CartesianIterator;
import edu.gatech.cc.liam.core.rl.models.JointIterator;

public class DecPOMDPFactory {

	public static DecPOMDP makeMATiger() {
		final double PROB_CORRECT_OBS = 0.85;
		final double PROB_WRONG_OBS = 1 - PROB_CORRECT_OBS;
		final double DISCOUNT_FACTOR = 0.9;
		double discount = DISCOUNT_FACTOR;
		final int numAgents = 2;
		List<String> states = Arrays.asList(new String[]{"tiger-left", "tiger-right"});  // list of states
		List<String> aSet =  Arrays.asList(new String[]{"open-left", "open-right", "listen"});
		List<String>[] actions = new List[]{aSet, aSet};
		List<String> oSet =  Arrays.asList(new String[]{"hear-left", "hear-right"});
		List<String>[] observations = new List[]{oSet, oSet}; // names of actions per player
		int numJActions = actions[0].size() * actions[1].size();
		int numStates = states.size();
		int[] obsSS = {observations[0].size(), observations[1].size()};
		int numJObs = obsSS[0] * obsSS[1];
		double[][][] transitionFunction = new double[numStates][numJActions][numStates];
		double[][] rewardFunction = new double[numStates][numJActions];
		for(int state=0; state<numStates; state++) {
			for(int jA=0; jA<numJActions; jA++) {
				if(jA == 8) { // listen / listen
					transitionFunction[state][jA][state] = 1.0;
					rewardFunction[state][jA] = -2.0;
				} else {
					transitionFunction[state][jA][0] = 0.5;
					transitionFunction[state][jA][1] = 0.5;
					boolean tigerLeft = (state == 0);
					switch (jA) {
					case 0: // {oL, oL}
						rewardFunction[state][jA] = tigerLeft ? -50 : 20;
						break;
					case 1: // {oL, oR}
						rewardFunction[state][jA] = -100;
						break;
					case 2: // {oL, l}
						rewardFunction[state][jA] = tigerLeft ? -101 : 9;
						break;
					case 3: // {oR, oL}
						rewardFunction[state][jA] = -100;
						break;
					case 4: // {oR, oR}
						rewardFunction[state][jA] = !tigerLeft ? -50 : 20;
						break;
					case 5: // {oR, l}
						rewardFunction[state][jA] = !tigerLeft ? -101 : 9;
						break;
					case 6: // {l, oL}
						rewardFunction[state][jA] = tigerLeft ? -101 : 9;
						break;
					case 7: // {l, oR}
						rewardFunction[state][jA] = !tigerLeft ? -101 : 9;
						break;
					case 8: // {l, l} // already handled
					default:
						assert(false);
						break;
					}
				}
			}
		}
		double[][][] obsFunction = new double[numJActions][numStates][numJObs];
		for(int jA=0; jA<numJActions; jA++) {
			for(int state=0; state<numStates; state++) {
				if(jA == 8) { // listen / listen
					for(int[] jObs : new CartesianIterator(obsSS)) {
						int jOi = JointIterator.getJointIndex(obsSS, jObs);
						obsFunction[jA][state][jOi] = 
							(jObs[0] == state ? PROB_CORRECT_OBS : PROB_WRONG_OBS) *
							(jObs[1] == state ? PROB_CORRECT_OBS : PROB_WRONG_OBS);
					}
				} else {
					for(int jOi=0; jOi<numJObs; jOi++)
						obsFunction[jA][state][jOi] = 1.0 / numJObs;				
				}
			}
		}
		double[] startDistr = {0.5, 0.5}; // distribution over states
		return new DecPOMDP(discount, states, observations, actions,
							transitionFunction, rewardFunction, obsFunction, startDistr);
	}
}
