package edu.gatech.cc.liam.core.rl.models.decpomdp;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import edu.gatech.cc.liam.core.rl.models.CartesianIterator;
import edu.gatech.cc.liam.core.rl.models.JointIterator;
import edu.gatech.cc.liam.geometry.linear.NPoint;
import edu.gatech.cc.liam.geometry.linear.Vector;

public class DecMDPFactory {

	
	public static DecMDP makeMultiAgentTiger(int horizon) {
		final double PROB_CORRECT_OBS = 0.85;
		final int END_STATE_INDEX = 1;
		final double DISCOUNT_FACTOR = 0.9;
		List<String> aSet =  Arrays.asList(new String[]{"open-left", "open-right", "listen"});
		List<String>[] actions = new List[]{Arrays.asList(new String[]{"noop"}), aSet, aSet};
		String[] observations = new String[]{"growl-left", "growl-right"};
		ArrayList<int[]> ts = new ArrayList<int[]>();
		ArrayList<String> tsStrings = new ArrayList<String>();
		ts.add(new int[]{});
		ts.add(new int[]{});
		tsStrings.add("start-state");
		tsStrings.add("end-state");
		for(int histLength = 1; histLength<=horizon; histLength++) {
			int[] obsSetsSizes = new int[histLength];
			for(int i=0; i<histLength; i++) {
				obsSetsSizes[i] = observations.length;
			}
			for(int[] obs : new CartesianIterator(obsSetsSizes)) {
				ts.add(obs);
				tsStrings.add(Arrays.toString(obs));
			}
		}
		List<String>[] types = new List[]{Arrays.asList(new String[]{"tiger-left", "tiger-right"}),
										  tsStrings, tsStrings};
		int[] numTypes = new int[]{types[0].size(), types[1].size(), types[2].size()};
		int numJTypes = types[0].size() * types[1].size() * types[2].size();
		int numJActions = 9; // 0: {oL, oL} 1: {oL, oR} 2: {oL, l} 3: {oR, oL} ....
		NPoint[][] transF = new NPoint[numJTypes][numJActions];
		for(int i=0; i<numJTypes; i++) {
			for(int j=0; j<numJActions; j++) {
				transF[i][j] = new NPoint(numJTypes);
			}
		}
		double[][] rewardFunction = new double[numJTypes][numJActions];
		// make transition probs
		int jTi = -1;
		for(int[] jT : new CartesianIterator(numTypes)) {
			jTi ++;
			assert(jTi == CartesianIterator.getJointIndex(numTypes, jT));
			System.out.println("State " + jTi + " is: " +
							   Arrays.toString(JointIterator.getObjects(types, jT)));
			if(jT[1] == END_STATE_INDEX && jT[2] == END_STATE_INDEX) {
				// double end state
				for(int jA = 0; jA<9; jA++) {
					transF[jTi][jA].values[jTi] = 1.0; // stay in same state
					rewardFunction[jTi][jA] = -2.0; // listen for now;
				}
				continue;
			}
			for(int jA = 0; jA<9; jA++) {
				int tigerPos = jT[0];
				// the game continues if both listen and the horizon hasn't been reached
				if(ts.get(jT[1]).length >= horizon || ts.get(jT[2]).length >= horizon) { 
					transF[jTi][jA].values[10] = 1.0; // type 0 is start state with tiger-left 
				} else {
					if(jA == 8) {
						int sJTLeftLeft = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jT, jA, 0, 0);
						int sJTRightLeft = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jT, jA, 1, 0);
						int sJTLeftRight = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jT, jA, 0, 1);
						int sJTRightRight = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jT, jA, 1, 1);
						if(tigerPos == 0) { // tiger is actually LEFT
							transF[jTi][jA].values[sJTLeftLeft] += PROB_CORRECT_OBS * PROB_CORRECT_OBS; // both correct
							transF[jTi][jA].values[sJTRightLeft] += PROB_CORRECT_OBS * (1 - PROB_CORRECT_OBS);
							transF[jTi][jA].values[sJTLeftRight] += PROB_CORRECT_OBS * (1 - PROB_CORRECT_OBS);
							transF[jTi][jA].values[sJTRightRight] += (1 - PROB_CORRECT_OBS) * (1 - PROB_CORRECT_OBS);  
						} else { // tiger is actually RIGHT
							transF[jTi][jA].values[sJTLeftLeft] += (1 - PROB_CORRECT_OBS) * (1 - PROB_CORRECT_OBS); 
							transF[jTi][jA].values[sJTRightLeft] += PROB_CORRECT_OBS * (1 - PROB_CORRECT_OBS);
							transF[jTi][jA].values[sJTLeftRight] += PROB_CORRECT_OBS * (1 - PROB_CORRECT_OBS);
							transF[jTi][jA].values[sJTRightRight] += PROB_CORRECT_OBS * PROB_CORRECT_OBS; // both correct					
						}
					} else {
						int sJTLeftLeft = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jT, jA, 0, 0);
						int sJTRightLeft = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jT, jA, 1, 0);
						int sJTLeftRight = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jT, jA, 0, 1);
						int sJTRightRight = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jT, jA, 1, 1);
						transF[jTi][jA].values[sJTLeftLeft] += 0.125;
						transF[jTi][jA].values[sJTRightLeft] += 0.125;
						transF[jTi][jA].values[sJTLeftRight] += 0.125;
						transF[jTi][jA].values[sJTRightRight] += 0.125;  
						int[] jTb = jT.clone();
						jTb[0] = 1-jTb[0];
						sJTLeftLeft = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jTb, jA, 0, 0);
						sJTRightLeft = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jTb, jA, 1, 0);
						sJTLeftRight = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jTb, jA, 0, 1);
						sJTRightRight = getNextJointIndex(ts, numTypes, END_STATE_INDEX, jTb, jA, 1, 1);
						transF[jTi][jA].values[sJTLeftLeft] += 0.125;
						transF[jTi][jA].values[sJTRightLeft] += 0.125;
						transF[jTi][jA].values[sJTLeftRight] += 0.125;
						transF[jTi][jA].values[sJTRightRight] += 0.125;  
					}
				}
	//					// otherwise the game resets
//					transF[jTi][jA][0] = 0.5; // type 0 is start state with tiger-left 
//					transF[jTi][jA][1] = 0.5; // type 1 is start state with tiger-right
					// otherwise the game ends
	//					transF[jTi][jA][END_STATE_INDEX] = 1.0; // type 0 is start state with tiger-left 
				boolean tigerLeft = (tigerPos == 0);
				switch (jA) {
				case 0: // {oL, oL}
					rewardFunction[jTi][jA] = tigerLeft ? -50 : 20;
					break;
				case 1: // {oL, oR}
					rewardFunction[jTi][jA] = -100;
					break;
				case 2: // {oL, l}
					rewardFunction[jTi][jA] = tigerLeft ? -101 : 9;
					break;
				case 3: // {oR, oL}
					rewardFunction[jTi][jA] = -100;
					break;
				case 4: // {oR, oR}
					rewardFunction[jTi][jA] = !tigerLeft ? -50 : 20;
					break;
				case 5: // {oR, l}
					rewardFunction[jTi][jA] = !tigerLeft ? -101 : 9;
					break;
				case 6: // {l, oL}
					rewardFunction[jTi][jA] = tigerLeft ? -101 : 9;
					break;
				case 7: // {l, oR}
					rewardFunction[jTi][jA] = !tigerLeft ? -101 : 9;
					break;
				case 8: // {l, l}
					rewardFunction[jTi][jA] = -2;
					break;
				default:
					assert(false);
					break;
				}
			}
		}
		double[] startDistr = new double[numJTypes];
		startDistr[0] = 0.5;
		startDistr[1] = 0.5;
		return new DecMDP(types, actions, transF, rewardFunction, startDistr, DISCOUNT_FACTOR);
	}
	
	private static int getNextJointIndex(ArrayList<int[]> ts, int[] numTypes, int END_STATE_INDEX,
										 int[] jT, int jA, int a0NextObs, int a0Next1bs) {
		int[] agent0Obs = ts.get(jT[1]);
		int[] agent1Obs = ts.get(jT[2]);
		int tigerPos = jT[0];
		int[] successor0 = makeNextObsHistory(agent0Obs, a0NextObs);
		int[] successor1 = makeNextObsHistory(agent1Obs, a0Next1bs);
		int[] nextTypes = new int[]{tigerPos, getType(ts, successor0), getType(ts, successor1)};
		if(jA < 6 || jT[1] == END_STATE_INDEX) // player 1 opened a door
			nextTypes[1] = END_STATE_INDEX;
		if((jA != 2 && jA != 5 && jA != 8) || jT[2] == END_STATE_INDEX) // player 2 opened a door
			nextTypes[2] = END_STATE_INDEX;
		int jointIndex = CartesianIterator.getJointIndex(numTypes, nextTypes);
		return jointIndex;
	}
	
	private static int getType(ArrayList<int[]> ts, int[] obsHistory) {
		for(int typePos = 0; typePos<ts.size(); typePos++) {
			if(Arrays.equals(ts.get(typePos), obsHistory))
				return typePos;
		}
		// let this break if it doesn't find anything (which should be an error)
		assert(false);
		return -1;
	}
	
	private static int[] makeNextObsHistory(int[] lastObsHist, int newObs) {
		int[] newObsHist = Arrays.copyOf(lastObsHist, lastObsHist.length+1);
		newObsHist[lastObsHist.length] = newObs;
		return newObsHist;
	}
	
	public static void main(String[] args) {
		DecMDP h1 = makeMultiAgentTiger(1);
		DecMDP h2 = makeMultiAgentTiger(2);
		DecMDP h3 = makeMultiAgentTiger(3);
		System.out.println(h1);
		System.out.println(h2);
		System.out.println(h3);
	}
}
