package edu.gatech.cc.liam.marl.decpomdp;

import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;

import edu.gatech.cc.liam.core.rl.models.CartesianIterator;
import edu.gatech.cc.liam.core.rl.models.JointIterator;
import edu.gatech.cc.liam.core.rl.models.decpomdp.DecPOMDP;

public class Experiments {

	public static void main(String[] args) {
//		String filename = "E:\\Users\\Liam\\workspaces\\workspaceShared\\dec-pomdp\\Problems\\dectiger.dpomdp";
//		String filename = "E:\\Users\\Liam\\workspaces\\workspaceShared\\dec-pomdp\\Problems\\broadcastChannel.dpomdp";
		String filename = "E:\\Users\\Liam\\workspaces\\workspaceShared\\dec-pomdp\\Problems\\recycling.dpomdp";
//		String filename = "E:\\Users\\Liam\\workspaces\\workspaceShared\\dec-pomdp\\Problems\\GridSmall.dpomdp";

//		String filename = "E:\\Users\\Liam\\workspaces\\workspaceShared\\dec-pomdp\\Problems\\boxPushingUAI07.dpomdp";
//		String filename = "E:\\Users\\Liam\\workspaces\\workspaceShared\\dec-pomdp\\Problems\\wirelessDelay.dpomdp";

//		String filename = "E:\\Users\\Liam\\workspaces\\workspaceShared\\dec-pomdp\\Problems\\Mars.dpomdp";
//		String filename = "E:\\Users\\Liam\\workspaces\\workspaceShared\\dec-pomdp\\Problems\\Grid3x3corners.dpomdp";

		try {
			System.out.println("Starting " + filename);
			PosgInputFileParser POSGReader = new PosgInputFileParser(filename);
			POSGReader.parse();
			DecPOMDP parsedDPDP = new DecPOMDP(POSGReader);
			testAprroxPOMDP(parsedDPDP);
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	public static void testAprroxPOMDP(DecPOMDP prob) {
		final int MAX_B = 3000;
		final double EPS = 0.0005;
		final int NUM_TYPES = 3;
		System.out.println("MAX_B = " + MAX_B + "  EPS = " + EPS + "  NUM_TYPES = " + NUM_TYPES);
		PPChoiceApprox theApprox = new PPChoiceApprox(prob, new int[]{NUM_TYPES, NUM_TYPES});
		for(int[] jT : new CartesianIterator(theApprox.dmdp.typeSS)) {
			int jTi = JointIterator.getJointIndex(theApprox.dmdp.typeSS, jT);
			System.out.println("State " + jTi + " is: " +
							    Arrays.toString(JointIterator.getObjects(theApprox.dmdp.types, jT)));
		}
		Belief startB = new Belief(theApprox.dmdp.getStartDist());
		ArrayList<ValueVector> initialVVs = new ArrayList<ValueVector>();
		initialVVs.add(new ValueVector(-2000.0, startB, theApprox.dmdp));
		DMDPValueIteration bvf = new DMDPValueIteration(theApprox.dmdp, initialVVs);
		BeliefGenerator bg = theApprox.makeNormalizedBG(startB,
				bvf.getEpsilonGreedyPolicy(0.9, theApprox.dmdp.getRandomPolicy()), 36, 100);
		bvf.runVI(startB, MAX_B, EPS, bg);
	}
	
	
//	public static void testDMDPVFApprox(int depth, int numIterations,
//			PPL0Approximator ppa, int numSamples, int simulationDepth) {
//		ArrayList<ValueVector> initialVVs = new ArrayList<ValueVector>();
//		Belief startB = new Belief(ppa.approxP.getStartDist());
//		initialVVs.add(new ValueVector(-20.0, startB, ppa.approxP));
//		DMDPValueIteration bvf = new DMDPValueIteration(ppa.approxP, initialVVs);
//		System.out.println(bvf);
//		Set<Belief> bSamples = new HashSet<Belief>(Arrays.asList(new Belief[]{startB}));
//		int samplingStart = 30;
//		for (int d = 0; d < depth; d++) {
//			bSamples.addAll(bvf.baseModel.sampleBeliefs(startB, ppa.approxP.getRandomPolicy(),
//														samplingStart, d + 1, false));
//			samplingStart *= 1.5;
//		}
//		System.out.println(bSamples.size() + " belief samples: ");// +
//																	// bSamples.toString());
//		for (int i = 1; i < numIterations; i++) {
//			System.out.println("************************\nIteration " + i);
//			 bvf.perseusBackup(bSamples);
//			System.out.println("\n" +  bvf);
//			 bvf.simulateBestPolicy(startB, depth);
//			if (i > 40) {
//				System.out.print("Testing utility on base problem.");
//				PPL0ApproxPolicy policy = new PPL0ApproxPolicy(ppa,  bvf);
//				double averageReward = 0.0;
//				for (int sr = 0; sr < numSamples; sr++) {
//					averageReward += ppa.problem.simulateTrace(new Belief(
//							ppa.problem.startDistr), policy, simulationDepth);
//				}
//				averageReward /= numSamples;
//				System.out.println("average utility: " + averageReward);
//			}
//			System.out.println("---end-" + i + "---");
//			// System.out.println(maTiger.getActionString(startB.supportVector.jTjADistrPolicy));
//		}
//		 bvf.simulateBestPolicy(startB, depth);
//	}

}
