package edu.gatech.cc.liam.marl.decpomdp;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Set;

import edu.gatech.cc.liam.core.PairComparable;
import edu.gatech.cc.liam.core.rl.models.CartesianIterator;
import edu.gatech.cc.liam.core.rl.models.JointIterator;
import edu.gatech.cc.liam.core.rl.models.decpomdp.DecMDP;
import edu.gatech.cc.liam.core.rl.models.decpomdp.DecMDPPolicy;
import edu.gatech.cc.liam.core.rl.models.decpomdp.DecPOMDP;
import edu.gatech.cc.liam.core.rl.models.decpomdp.DecPOMDPFactory;
import edu.gatech.cc.liam.geometry.linear.SparsePoint;

public class PPChoiceApprox {
	private final static int MAX_SAMPLE_ATTEMPTS = 500;
	
	DecPOMDP problem; // base problem trying to solve
	DecMDP dmdp; // the approximation of the problem
	int[] numTypesPerPlayer;
	
	public PPChoiceApprox(DecPOMDP problem, int[] numTypesPerPlayer) {
		this.problem = problem;
		this.numTypesPerPlayer = numTypesPerPlayer;
		makePPChoiceApprox();
	}
	
	private void makePPChoiceApprox() {
		// construct types
		@SuppressWarnings("unchecked")
		ArrayList<String>[] types = new ArrayList[problem.numAgents + 1];  // an extra agent for nature
		types[0] = new ArrayList<String>();
		for(int s=0; s<problem.numStates; s++) {
			types[0].add(problem.states.get(s));
		}
		for(int i = 0; i<problem.numAgents; i++) {
			types[i+1] = new ArrayList<String>();
			for(int t=0; t < numTypesPerPlayer[i]; t++) {
				types[i+1].add("t:" + t + "o:--");
				for(int o=0; o < problem.obsSS[i]; o++) {
					types[i+1].add("t:" + t + ",o:" + problem.observations[i].get(o));
				}
			}
		}
		// a "noop" for nature is added to actions 
		@SuppressWarnings("unchecked")
		List<String>[] actions = new List[problem.numAgents + 1];
		actions[0] = Arrays.asList(new String[]{"noop"});
		for(int i=0; i < problem.numAgents; i++) {
			actions[i+1] = new ArrayList<String>();
			for(int a=0; a<problem.actionSS[i]; a++) {
				actions[i+1].add(a + ":" + problem.actions[i].get(a));
			}
			for(int a=problem.actionSS[i]; a < numTypesPerPlayer[i]; a++) {
				actions[i+1].add(a + ":noop");
			}
		}
		// discount should be the same every TWO steps, so the square root
		dmdp = new DecMDP(types, actions, Math.sqrt(problem.discount));

		// construct transition function
		SparsePoint[][] transitionFunction = 
			new SparsePoint[dmdp.numJTypes][dmdp.numJActions]; // [start-type][actions][end-type]
		for(int i=0; i<dmdp.numJTypes; i++) {
			for(int j=0; j<dmdp.numJActions; j++) {
				transitionFunction[i][j] = new SparsePoint(dmdp.numJTypes);
			}
		}
		for(int[] jStartT : new CartesianIterator(dmdp.typeSS)) {
			int jStartTi = JointIterator.getJointIndex(dmdp.typeSS, jStartT);
			switch (getJBeliefStage(jStartT)) {
			case 0:
				for(int jA[] : new CartesianIterator(dmdp.actionSS)) {
					int jAi = JointIterator.getJointIndex(dmdp.actionSS, jA);
//					if(isRegularJA(jA)) {
					int[] jARegularized = Arrays.copyOfRange(jA, 1, jA.length);
					makeRegularExpansionJA(jARegularized);
					int jABasei = JointIterator.getJointIndex(problem.actionSS, jARegularized);
					for(int nextTrueState = 0; nextTrueState < problem.numStates; nextTrueState++) {
						for(int[] jO : new CartesianIterator(problem.obsSS)) {
							int jOi = JointIterator.getJointIndex(problem.obsSS, jO);
							// construct the next type
							int[] nextJT = new int[dmdp.numAgents];
							nextJT[0] = nextTrueState;
							for(int i=0; i<problem.numAgents; i++) {
								nextJT[i+1] = jStartT[i+1] + jO[i] + 1;
							}
							int nextJTi = JointIterator.getJointIndex(dmdp.typeSS, nextJT);
							// find our the probability of this transition, obs happening
							double nextTProb = problem.getTransProb(jStartT[0], jABasei, nextTrueState) *
											   problem.getObsProb(jABasei, nextTrueState, jOi);
							assert transitionFunction[jStartTi][jAi].get(nextJTi) == 0.0;
							transitionFunction[jStartTi][jAi].add(nextJTi, nextTProb);
						}
					}
//					} else {
//						transitionFunction[jStartTi][jAi].set(jStartTi, 1.0);
//					}
				}
				break;
			case 1:
				for(int jA[] : new CartesianIterator(dmdp.actionSS)) {
					int jAi = JointIterator.getJointIndex(dmdp.actionSS, jA);
					int[] nextJT = new int[dmdp.numAgents];
					nextJT[0] = jStartT[0];
					for(int i=0; i<problem.numAgents; i++) {
						nextJT[i+1] = (jA[i+1]%this.numTypesPerPlayer[i]) * (problem.obsSS[i]+1);
					}
					int nextJTi = JointIterator.getJointIndex(dmdp.typeSS, nextJT);
					transitionFunction[jStartTi][jAi].set(nextJTi, 1.0);
				}
				break;
			default: // invalid type, leave all transitions 0
				break;
			}

		}
		// construct reward function (same as base reward function operating on nature's type)
		double[][] rewardFunction = new double[dmdp.numJTypes][dmdp.numJActions];
		for(int[] jT : new CartesianIterator(dmdp.typeSS)) {
			if(getJBeliefStage(jT) != 0) // only give rewards for the normal stage
				continue;
			int jTi = JointIterator.getJointIndex(dmdp.typeSS, jT);
			for(int jA[] : new CartesianIterator(dmdp.actionSS)) {
				int jAi = JointIterator.getJointIndex(dmdp.actionSS, jA);
				int[] jARegularized = Arrays.copyOfRange(jA, 1, jA.length);
				makeRegularExpansionJA(jARegularized);
				int jABasei = JointIterator.getJointIndex(problem.actionSS, jARegularized);
				rewardFunction[jTi][jAi] = problem.getReward(jT[0], jABasei);
			}
		}
		// compute starting distribution (all agents start at type 0)
		double[] startDistr = new double[dmdp.numJTypes];
		for(int s = 0; s < problem.numStates; s++) {
			int[] jT = new int[dmdp.numAgents];
			jT[0] = s;
			int jTi = JointIterator.getJointIndex(dmdp.typeSS, jT);
			startDistr[jTi] = problem.startDistr[s];
		}
		dmdp.setFunctions(transitionFunction, rewardFunction, startDistr);
	}

	// returns: -1 for invalid
	// 			0 for normal
	//			1 for belief-contraction
	private int getJBeliefStage(int[] jT) {
		if(isValidBeliefs(jT)) {
			if(jT[1] % (problem.obsSS[0]+1) == 0)
				return 0;
			else
				return 1;
		} 
		return -1;
	}
	private boolean isValidBeliefs(int[] jT) {
		boolean isFirstNormal = (jT[1] % (problem.obsSS[0]+1) == 0);
		for(int i=0; i<problem.numAgents; i++) {
			if((jT[i+1] % (problem.obsSS[i]+1) == 0) != isFirstNormal)
				return false;
		}
		return true;
	}
//	private boolean isRegularJA(int[] jA) {
//		for(int i=0; i<problem.numAgents; i++)
//			if(jA[i+1] >= problem.actionSS[i])
//				return false;
//		return true;
//	}
	private void makeRegularExpansionJA(int[] jA) {
		for(int i=0; i<problem.numAgents; i++) {
			jA[i] %= problem.actionSS[i];
		}
	}
	
	// order the types of agents in a belief by descending probability
	public Belief normalizeBelief(Belief b) {
		double[][] agentTypeProbs = new double[dmdp.numAgents-1][]; // not including nature
		for(int a=1; a<dmdp.numAgents; a++) { // skip nature
			agentTypeProbs[a-1] = new double[dmdp.typeSS[a]];
		}
		for(int jTi=0; jTi<dmdp.numJTypes; jTi++) {
			int[] jT = JointIterator.getJoint(dmdp.typeSS, jTi);
			for(int a=1; a<dmdp.numAgents; a++) { // skip nature
				agentTypeProbs[a-1][jT[a]] += b.getProb(jTi);
			}
		}
		// sort types
		Belief newB = b;
		for(int a=1; a<dmdp.numAgents; a++) { // skip nature
			ArrayList<PairComparable<Double, Integer>> probPosList = new ArrayList<PairComparable<Double,Integer>>();
			for(int t=0; t<dmdp.typeSS[a]; t++) {
				probPosList.add(new PairComparable<Double, Integer>(agentTypeProbs[a-1][t], t));
			}
			Collections.sort(probPosList, Collections.reverseOrder());
			for(int t=0, desPos=0; desPos<dmdp.typeSS[a]; t++, desPos++) {
				Belief swapB = newB.swapTypes(dmdp.typeSS, a, probPosList.get(t).obj2, desPos,
											  dmdp.validTypeList);
				if(swapB != null) {
					newB = swapB;
				} else {
					//don't progress the swap type
					t--;
				}
			}
		}
		assert newB.validate(dmdp.validTypeList);
		return newB;
	}
	
	public BeliefGenerator makeNormalizedBG(final Belief startBelief, final DecMDPPolicy samplePolicy,
											final int depth, final int numBToAdd) {
		class NormalizedBG implements BeliefGenerator {
			@Override
			public void addBeliefs(Set<Belief> bSet) {
				int bNeeded = bSet.size() + numBToAdd;
				int numAttempts = 0;
				while(bSet.size() < bNeeded && numAttempts++ < MAX_SAMPLE_ATTEMPTS) {
					for(int d=0; d<depth; d++) {
						ArrayList<Belief> unNormalizedTrace =
							dmdp.sampleBeliefTrace(startBelief, samplePolicy, d+1, null);
						Belief b = unNormalizedTrace.get(d);
//						if(d % 2 == 1) {
//							Belief nb = normalizeBelief(b);
//							if(!b.softEquals(nb))
//								b = nb;
//						}
						if(!Belief.softContains(bSet, b)) {
							bSet.add(b);
						}
					}
				}
			}
		}
		return new NormalizedBG();
	}

	public static void main(String[] args) {
		DecPOMDP tigerProb = DecPOMDPFactory.makeMATiger();
		PPChoiceApprox theApprox = new PPChoiceApprox(tigerProb, new int[]{3, 3});
		for(int[] jT : new CartesianIterator(theApprox.dmdp.typeSS)) {
			int jTi = JointIterator.getJointIndex(theApprox.dmdp.typeSS, jT);
			System.out.println("State " + jTi + " is: " +
							    Arrays.toString(JointIterator.getObjects(theApprox.dmdp.types, jT)));
		}
		Belief startB = new Belief(theApprox.dmdp.getStartDist());
		ArrayList<ValueVector> initialVVs = new ArrayList<ValueVector>();
		initialVVs.add(new ValueVector(-40.0, startB, theApprox.dmdp));
		DMDPValueIteration bvf = new DMDPValueIteration(theApprox.dmdp, initialVVs);
		BeliefGenerator bg = theApprox.makeNormalizedBG(startB,
				bvf.getEpsilonGreedyPolicy(0.9, theApprox.dmdp.getSymRandomPolicy()), 36, 100);

		bvf.runVI(startB, 3000, 0.01, bg);
	}
}
