package edu.asu.sapa;

import java.io.FileInputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;

import edu.asu.sapa.ground.Problem;
import edu.asu.sapa.ground.Operator;
import edu.asu.sapa.ground.PropDB;
import edu.asu.sapa.ground.State;
import edu.asu.sapa.lifted.Domain;
import edu.asu.sapa.lifted.LiftedProblem;
import edu.asu.sapa.parsing.PDDL21Parser;
import edu.asu.sapa.ground.update.Context;
import edu.asu.sapa.heuristic.PG;

//Generated:1022901	Explored:77131	Pruned:500000	Unique:666214	Open:343378
//Generated:2429639	Explored:221009	Pruned:1290000	Unique:1602337	Open:716716	F:9.42	G:23.00	H:10.00	GC:18198.26	HC:3588.58	HR:0.00	Q_U:73811.28	Q_F:5.51	BestU:73673.67	MaxR:98075.48	U:0.00
//Generated:3160712	Explored:214562	Pruned:1705000	Unique:1185913	Open:761845	F:12.30	G:25.00	H:9.00	GC:24880.56	HC:3447.63	HR:98075.49	Q_U:70709.01	Q_F:9.88	BestU:72826.26	MaxR:98075.48	U:69747.30


/**
 * Planner.java: The main class to actually do the search will use the main DS
 * and utility functions defined in other classes.o
 */
public class Planner {

	//public static final float EPSILON = 0.01f;
	//public static final float EPSILON = 0.0625f;
	//public static final float EPSILON = 1.0f / (1 << 5);
	public static final float EPSILON = 1.0f / (1 << 7);  
	//public static final float EPSILON = 0.0078125f;  // same as above
	//public static final float EPSILON = 0.5f;

	public static LiftedProblem liftedProblem;
	public static Problem problem;
	public static long timeX;

	public StateQ queue = new StateQ(65536);
	//public StateQ queue = new StateQ(1<<12);
	//public StateQ queue = new StateQ(256);


	private PG hardPG = new PG();
	
	public boolean relaxedPlanHeuristic = true;
	public int costPropOption = PG.CostPropCombo;
	public int lookaheadOption = -1;
	public int goalCostOption = 1;
	public boolean haFlag = false;
	public boolean haneFlag = false;

	public boolean res_adj = true;
	// public boolean PSP = false;
	
	public int bestH = Integer.MAX_VALUE;
	private int bestG = Integer.MAX_VALUE;

	private float maxReward = Float.POSITIVE_INFINITY;
	private float bestBenefit = Float.NEGATIVE_INFINITY;
	//private float bestBenefit = 88000;
	private float bestMakespan = Float.POSITIVE_INFINITY;
	
	public boolean autoFlag = false;
	public boolean qualityFlag = true;

	private String outfileName = new String("");

	public boolean executeRP = false;

	private float G_WEIGHT = 1;
	private float H_WEIGHT = 2f; // try 2 -- cute: "A bird in hand is worth two in the bush."
	private float C_WEIGHT = 1; // try 1 // see initSearch()
	private float PRUNE_HCWEIGHT = 0.0f; //admissible
	//private float PRUNE_UWEIGHT = 0.0000f;

	float fValue;
	float initEstimate;
	
	//private State sp;

	HashMap<State,State> duplicates = new HashMap<State,State>(queue.table.length);
	
	private int generatedStates = 0;
	private int exploredStates = 0;
	private int goalStates = 0;
	private int prunedStates = 0;


	private long time1, time2;

	private Date aDate;

	public boolean goalSelect = false;

	// due to unfortunate Java things, we must poll this variable to see if it
	// is time to be interrupted
	public volatile boolean interrupted = false;

	private final boolean initEvaluate(State tempSP) {
		
		if (!hardPG.costPropagation(tempSP)) {
			prune(tempSP,Float.NaN);
			return false;
		}

		
		int heuristicValue = hardPG.relaxedPlanHeuristic();

		// if (res_adj && relaxedPlanOption) {
		// float resadjValue = util.resourceAdjustment(rmtpg.getRelaxedPlan(),
		// rmtpg.getRPSize(), tempSP.fluentDB, false);
		// heuristicValue += resadjValue;
		// }
		tempSP.h = heuristicValue;
		tempSP.hCost = hardPG.hCost;
		tempSP.hReward = hardPG.hReward;

		initEstimate = tempSP.hReward - tempSP.hCost - tempSP.gCost;

//		if (bestBenefit <= Float.NEGATIVE_INFINITY)
			fValue = 0;
//		else
//			fValue = G_WEIGHT * (tempSP.g) + H_WEIGHT * (heuristicValue) - C_WEIGHT*(estimate-bestBenefit) / (initEstimate-bestBenefit);
			
		//		fValue = G_WEIGHT * (tempSP.gCost) + H_WEIGHT * (tempSP.hCost - tempSP.hReward);
		//		fValue += (tempSP.g + tempSP.h) / (init.g + init.h);

		if (qualityPrune(tempSP,initEstimate))
			return false;
		
		if (worseDuplicate(tempSP)) {
			prune(tempSP,0);
			return false;
		}
		

		queue.add(tempSP,fValue);

		Operator[] children = hardPG.applicableOperators();
		tempSP.setPotentialOperators(children);

		if (tempSP.time < Float.POSITIVE_INFINITY && executeRP) {
			tempSP.rp = hardPG.getRelaxedPlan();
		}
		
		return true;
	}
	
	private final boolean evaluate(State tempSP) {

		if (tempSP.time >= Float.POSITIVE_INFINITY) {
			tempSP.h = 0;
			tempSP.hCost = 0;
			tempSP.hReward = 0;
			float estimate = 0 - tempSP.gCost;
			
			if (estimate < bestBenefit || (estimate == bestBenefit && makespanInternal(tempSP) >= bestMakespan)) {
				prune(tempSP,estimate);
				return false;
			}
			
			fValue = G_WEIGHT * (tempSP.g) - C_WEIGHT*(estimate) / initEstimate;
			queue.add(tempSP,fValue);
			
			return true;
		}

		
		if (!hardPG.costPropagation(tempSP)) {
			prune(tempSP,Float.NaN);
			return false;
		}

		
		int heuristicValue = hardPG.relaxedPlanHeuristic();

		// if (res_adj && relaxedPlanOption) {
		// float resadjValue = util.resourceAdjustment(rmtpg.getRelaxedPlan(),
		// rmtpg.getRPSize(), tempSP.fluentDB, false);
		// heuristicValue += resadjValue;
		// }
		tempSP.h = heuristicValue;
		tempSP.hCost = hardPG.hCost;
		tempSP.hReward = hardPG.hReward;

		float estimate = tempSP.hReward - tempSP.hCost - tempSP.gCost;

		if (qualityPrune(tempSP,estimate))
			return false;
		
		if (worseDuplicate(tempSP)) {
			prune(tempSP,0);
			return false;
		}
		
//		if (bestBenefit <= Float.NEGATIVE_INFINITY)
		fValue = G_WEIGHT * (tempSP.g) + H_WEIGHT * (heuristicValue) - C_WEIGHT*(estimate) / initEstimate;
//	else
//		fValue = G_WEIGHT * (tempSP.g) + H_WEIGHT * (heuristicValue) - C_WEIGHT*(estimate-bestBenefit) / (initEstimate-bestBenefit);
		
	//		fValue = G_WEIGHT * (tempSP.gCost) + H_WEIGHT * (tempSP.hCost - tempSP.hReward);
	//		fValue += (tempSP.g + tempSP.h) / (init.g + init.h);

//		if (heuristicValue >= tempSP.parent.h)
//			fValue += 2*this.H_WEIGHT;
			//fValue += 100;

		queue.add(tempSP,fValue);

		Operator[] children = hardPG.applicableOperators();
		tempSP.setPotentialOperators(children);

		if (tempSP.time < Float.POSITIVE_INFINITY && executeRP) {
			tempSP.rp = hardPG.getRelaxedPlan();
		}
		
		return true;
	}

	/** * Function to generate the search tree until the solution node is found ** */
	synchronized public State getNextSolution() {
		int j;
		State tempSP;
		State sp;
		Operator[] potentialOperators;

		StateQ.Wrapper wrapper;
		Context c = new Context();

		do {
			do {
				wrapper = queue.poll();
				if (wrapper == null)
					return null;
				sp = wrapper.event;
				
				if (worseDuplicate(sp)) {
					prune(sp,0);
					continue;
				}

				if (sp.time == Float.POSITIVE_INFINITY) {
					goalStates++;
					if (processSolution(sp)) {
						return sp;
					}
					continue;
				} 
				
				// all advance time children pre-generated
//				tempSP = new State(sp);
//				tempSP.parent = sp;
//				++generatedState;
//				
//				if (tempSP.advance())
//					evaluate(tempSP);
				break;
			} while(true);


			if (executeRP)
				useRP(sp,sp.rp,sp.rp.length);
			
			// advance-time child
			tempSP = new State(sp);
			tempSP.parent = sp;
			++generatedStates;
				
			// investigate those infinities, triple check CostFunction.addCost()
			if (tempSP.advance()) {
				evaluate(tempSP);
			} else {
				prune(tempSP,-3);
			}
				
			potentialOperators = sp.getPotentialOperators();

			for (Operator a : potentialOperators) {

				a.makeContext(sp,c);
				// completely take over with PG
				//if (!a.applicable(sp,c))
				//	continue;

				tempSP = sp.child(a,c);
				++generatedStates;
				
				if (tempSP == null) {
					prune(sp,-1);
					continue;
				} else {
					c = new Context();
				}
				
				evaluate(tempSP);

			}

			exploredStates++;
			sp.constraints = null;
			sp.constraintsSize = 0;
			sp.potentialOperators = null;
			sp.rp = null;
			//sp.propDB = null;
			//sp.fluentDB = null;
			sp.events.table = null;
			sp.events.size = 0;

		} while(!interrupted);

		return null;
	}

	private boolean deadAncestor(State tempSP) {
		if (tempSP == null)
			return false;
		if (tempSP.operator == -1)
			return true;
		if (deadAncestor(tempSP.parent)) {
			tempSP.operator = -1;
			tempSP.parent = null;
			return true;
		}
		return false;
	}
	
	private boolean worseDuplicate(State tempSP) {
		if (deadAncestor(tempSP))
			return true;
		if (tempSP.quiescent()) {
			State s = duplicates.get(tempSP);
			if (s == tempSP) {
				return false;
			}
			if (s == null) {
				duplicates.put(tempSP,tempSP);
				return false;
			}
			if (tempSP.gCost < s.gCost || (tempSP.gCost == s.gCost && tempSP.time < s.time)) {
				s.operator = -1;
				s.parent = null; // recover memory
				duplicates.put(tempSP,tempSP);
				return false;
			}
			return true;
		}
		return false;
	}
	
	// consider getting rid of this function and replace it's use by worseDuplicate inside of useRP
	private boolean betterDuplicate(State tempSP) {
		if (deadAncestor(tempSP))
			return false;
		if (tempSP.quiescent()) {
			State s = duplicates.get(tempSP);
			if (s == tempSP) {
				return false;
			}
			if (s == null) {
				duplicates.put(tempSP,tempSP);
				return true;
			}
			if (tempSP.gCost < s.gCost || (tempSP.gCost == s.gCost && tempSP.time < s.time)) {
				s.operator = -1;
				s.parent = null; // recover memory
				duplicates.put(tempSP,tempSP);
				return true;
			}
			return false;
		}
		return true;
	}

	synchronized public void setGrounding(Problem g) {
		problem = g;
		initSearch();
	}

	State[] intermediates = new State[16];
	int nIntermediates=0;
	boolean RPIntermediates = true;
	
	// TODO
	// FIXME
	//need to sort RP by operator ids also, so that we can generate parallel sets of actions in the right order, at the right times
	// (otherwise duplicate detection will render void the result of not doing this, which will be to delay the operator, and thus the makespan)
	final private State useRP(State sp, int[] relaxedPlan, int rpSize) {
		Operator a;
		State newSP, tempSP;
		int i;
		float next;
		float t;
		Context c;
		
		int count = 0;
		int s = nIntermediates;

		tempSP = sp;
		c=new Context();
		next = tempSP.getNextTime();
		for(i=rpSize-1; i>=0; --i) {
			a = Problem.operators.get(relaxedPlan[i]);
			a.makeContext(tempSP,c);
			
			if (!a.applicable(tempSP,c)) {
				if (next >= Float.POSITIVE_INFINITY)
					continue;
				
				newSP = new State(tempSP);
				newSP.parent = tempSP;
				++generatedStates;
				
				boolean possible;
				boolean applicable;
				do {
					possible = newSP.advance();
					a.makeContext(newSP,c);
					applicable = a.applicable(newSP,c);
					t = newSP.getNextTime();
				} while (t < Float.POSITIVE_INFINITY && !applicable && possible);
				
				if (!possible) {
					prune(newSP,-4);
					continue;
				}
				
				if (RPIntermediates) {
					if (betterDuplicate(newSP)) {
						if (nIntermediates >= intermediates.length) {
							State[] n = new State[nIntermediates + (nIntermediates>>1)];
							System.arraycopy(intermediates,0,n,0,nIntermediates);
							intermediates = n;
						}
						intermediates[nIntermediates++] = newSP;
						count++;
					} else {
						prune(newSP,-5);
						// don't continue the loop -- maybe the RP will take us somewhere new.
						// (think of prefix-overlapping RPs)
						// but ``prune'' the state because it doesn't end up in the queue
					}
				} else {
					if (tempSP != sp)
						prune(tempSP,-6);
					// see above; state doesn't enter the queue
				}
				
				tempSP=newSP;
				next = t;
				
				if (!applicable)
					continue;
				
				a.makeContext(tempSP,c);
			}
			
			newSP = tempSP.child(a,c);
			++generatedStates;
			if (newSP != null) {
				if(RPIntermediates) {
					if (betterDuplicate(newSP)) {
						if (nIntermediates >= intermediates.length) {
							State[] n = new State[nIntermediates + (nIntermediates>>1)];
							System.arraycopy(intermediates,0,n,0,nIntermediates);
							intermediates = n;
						}
						intermediates[nIntermediates++] = newSP;
						count++;
					} else {
						prune(newSP,-5);
					}
				} else {
					if (tempSP != sp)
						prune(tempSP,-6);
				}
				
				tempSP = newSP;
				next = newSP.getNextTime();
				
				c = new Context();
			} else {
				prune(tempSP,-2);
			}
		}
		
		if (RPIntermediates) {
			if (count <= 0)
				return null;
	
			// TODO: re-introduce biasing in favor of advancing time
			// TODO: investigate infinities in debugging output of prune
			// pro: populate duplicate checking
			// con: pollutes the q with lots of options
			// pro: higher chance of obtaining an execution of a good subset of a RP 
			// note: having done duplicateCheck(newSP) there is an obligation to eventually evaluate newSP, that is, 
			// to give it a shot at the queue
			
			//executeRP = false;
			int sz = nIntermediates;
			for(i=s; i < sz; ++i) {
				evaluate(intermediates[i]);
			}
			nIntermediates -= count;
			//executeRP = true;
		} else if (tempSP != sp) {
			evaluate(tempSP);
		}

		return tempSP;
	}

	public float getMaxReward() {
		return maxReward;
	}

	public boolean haveSamePreds(State s1, State s2) {
		PropDB p1 = s1.getPropDB();
		PropDB p2 = s2.getPropDB();

		if (p1.equals(p2))
			return true;

		return false;
	}

	public void update() {

		timeX = (new Date()).getTime();
		problem.update();

		System.out.println(";;Reachable: Fluents: " + Problem.fluents.size()
				+ " Propositions: " + Problem.propositions.size() + " Operators: "
				+ Problem.operators.size());
		System.out.println(";;Dynamic: Fluents: " + problem.numReachableFluents
				+ " Propositions: " + problem.numReachableProps + " Actions: " + problem.numReachableOperators);

		/*
		 * Finish initialization. Do some pre-processing.
		 */
		aDate = new Date();
		time1 = aDate.getTime();

		System.out.println(";;Parsing & grounding: " + (time1 - timeX)
				+ " milliseconds.");

		// Pass the Goals & Ground Actions information to the Utility class

		// need to initialize here for the selection of goals 
		// (before initializing the search)
		hardPG.buildBiLevelGraph(Problem.operators, Problem.propositions);		
	}


	/**
	 * Main routine to solve the problem
	 */
	public void initialize(LiftedProblem prob) {
		liftedProblem = prob;


		problem = new Problem(true);
		maxReward = problem.initialize(prob);

		System.out.println(";;Reachable: Fluents: " + Problem.fluents.size()
				+ " Propositions: " + Problem.propositions.size() + " Operators: "
				+ Problem.operators.size());
		System.out.println(";;Dynamic: Fluents: " + problem.numReachableFluents
				+ " Propositions: " + problem.numReachableProps + " Actions: " + problem.numReachableOperators);

		/*
		 * Finish initialization. Do some pre-processing.
		 */
		aDate = new Date();
		time1 = aDate.getTime();

		System.out.println(";;Parsing & grounding: " + (time1 - timeX)
				+ " milliseconds.");

		// Pass the Goals & Ground Actions information to the Utility class

		// need to initialize here for the selection of goals 
		// (before initializing the search)
		hardPG.optionSetting(this);
		hardPG.buildBiLevelGraph(Problem.operators, Problem.propositions);
	}

	public void initSearch() {

		System.out.println("\n;;<<< Start Searching for Solution >>>");

		generatedStates = 1;
		exploredStates = 0;
		prunedStates = 0;
		goalStates = 0;

		State sp = problem.init;
		if (!initEvaluate(sp)) {
			System.out.println("Problem unsolvable? (maybe?)");
		}
		
		C_WEIGHT = this.G_WEIGHT*sp.g + this.H_WEIGHT*sp.h; // ?? what should this be...??
	}

	private void renderAction(StringBuilder b, float time, String name, float dur, int i) {
		b.append(String.format("%.7f", time + i * EPSILON)).append(": ");
		//b.append(time+i*EPSILON).append(": ");
		b.append(name);
		b.append('[').append(dur).append("]\n");
	}

	/**
	 * Print the usage to run the planner
	 */
	private void printUsage() {
		System.out
		.println("Usage: java [Sapa-dir].Planner domain.pddl problem.pddl [option]\n"
				+ "Flags: -cp [NUMBER] -norp -gc -la [NUMBER] -noauto -quality -postProcess"
				+ " -ha -hane -noresadj -timelimit [NUMBER] -freq [NUMBER] -outfile [STRING]\n\n"

				// +"\t-gui Use the GUI\n"
				// + "\t-debug Print detailed problems, domains, actions
				// information\n"
				+ "\t-cp                Cost Propagation Option: 0-max; 1-sum (default); 2-Combo\n"
				+ "\t-norp              Turn off Relaxed Plan heuristic (-rp to turn on)\n"
				+ "\t-gc                GoalCost Aggregation Option: 0-max; 1-sum(default); 2-Combo\n"
				+ "\t-la                Lookahead option. Default: lookahead = -1\n"
				+ "\t-noauto            Turn off *auto* running option (-auto to turn on)"
				+ "\t-quality           Try to improve the quality after found first solution (with different options)\n"
				+ "\t-ha                Helpful actions (auto = false)\n"
				+ "\t-hane              Using negative effects of helpful actions (auto=false)\n"
				+ "\t-noresadj          Do not use the resource adjustment technique\n"
				// + "\t-dupa Check duplicate parameters in action
				// description\n"
				+ "\t-timelimit         Time cutoff in seconds (to stop the program)\n"
				+ "\t-freq              Frequency to check the time cutoff limit\n"
				+ "\t                       (e.g number of generated search nodes)\n"
				+ "\t-hw                Weight given to the h value (heuristic = g + hw*h)\n"
				+ "\t-outfile           Output file for the solution.\n"
				+ "\t-psp				Solving PSP problem\n"
				+ "\t-sapaps            Ignore goal dependencies in heuristic (act like SapaPS)\n"
				+ "\t-goalremove        Use SapaPS goal removal technique in heuristic\n"
				// + "\t-hmax Use hMax, admissible PSP heuristic\n"
				+ "\t-userp             Use (as many as possible) actions in the relaxed plan to move forward\n"
				+ "\t-norefinegoal      In PSP, don't use refine steps to remove Goals and actions when estimate heus\n"
				+ "\t-goalselect        Select Goals from the initial state's relaxed plan\n"
				// + "\t-sas Specify the SAS file associated with the
				// given domain\n"
				// + "\t-group Specify the SAS group file associated
				// with the given domain\n"
				+ "\t-outputcostfile    Output the costs file to test other planners like OptiPlan and AltAltPS\n"
				+ "\t-sgPSP             Static Greedy PSP search\n"
				// + "\t-usePlanGraph Use the planning graph to execute
				// RP (forces -userp, SAS and optimalpsp)\n"
				// + "\t-printNodeTime Periodically print the time spent
				// per node\n"
				// + "\t-bound Do not search past root node\n"
				+ "\t-pddl3             PDDL3 simple preferences mode (but still read domain as if PSP)\n"
				+ "\t-forceForward      For optimal search, try to improve the solution using RP\n"
				+ "\t                      (0 = always, 1 = only if improvement (less greedy))\n"
				// + "\t-cplex Use an ILOG CPlex solver (will cause an
				// error if none exists)\n"
				// + "\t-holdbad Bad nodes held with probability equal
				// to distance from current best (PSP)\n"
				+ "(Some) Default options: -cp 1 -la -1 -rp -userp -noauto -quality\n\n");
	}

	/**
	 * Function to print the solution
	 */

	private boolean processSolution(State sp) {
		float benefit = sp.benefit();
		if (benefit < 0)
			return false;
		float makespan = makespan(sp);
		if (benefit >= bestBenefit) {
			if (benefit > bestBenefit) {
				bestBenefit = benefit;
				bestMakespan = makespan;
			} else if (makespan < bestMakespan) {
				bestMakespan = makespan;
			} else
				return false;
		} else
			return false;

		//		printSolution(sp);

		return true;
	}

	public ArrayList<Context> extractPlan(State sp) {
		ArrayList<Context> plan = new ArrayList<Context>();
		float m=0f;
		Context c = new Context();
		c.id = 0;
		c.cost = sp.gCost;
		plan.add(c);
		State s=sp;
		while (s != null) {
			if (s.c != null) {
				plan.add(s.c);
				if (s.c.finish > m)
					m = s.c.finish;
			}
			s = s.parent;
		}
		c.finish = m;
		c.start = m;
		c.duration = 0.0f;
		return plan;
	}
	
	public void printSolution(ArrayList<Context> plan) {
		Context done = plan.get(0);
		float benefit = 0f-done.cost;
		float makespan = done.finish;
		
		aDate = new Date();
		time2 = aDate.getTime();
		System.out.println("\n;; Search time " + (time2 - time1) + " millisecs");
		//time1 = time2;
		System.out.println(";; State generated: " + generatedStates + "\tState explored: " + exploredStates);

		StringBuilder b = new StringBuilder();
		Context c;
		
		for (int i = plan.size()-1; i > 0; --i){
			c = plan.get(i);
			int actID = c.id;
			Operator act = Problem.operators.get(actID);
			renderAction(b,c.start, act.getName(), c.duration, 0);
		}
		
		renderAction(b,done.start,"(done)",done.duration,0);

		//		System.out.println(";; Utility: " + benefit + "  Actions: "
		//				+ actions.size() + "  Makespan: " + String.format("%.2f", m));
		System.out.println(";; Utility: " + benefit + "  Actions: "
				+ (plan.size()-1) + "  Makespan: " + makespan);

		System.out.print(b.toString());

	}

	public float makespan(State sp) {
		float makespan=0f;
		float temp;
		while (sp != null) {
			if (sp.c != null) {
				temp = sp.c.finish;
				if (temp > makespan)
					makespan = temp;
			}
			sp = sp.parent;
		}
		return makespan;
	}
	
	public float makespanInternal(State sp) {
		if (sp.parent == null)
			return sp.time;
		return sp.parent.time;
	}

	final private void prune(State sp, float estimate) {
		++prunedStates;
		// if (prunecounter % 70 == 0) {
		// System.out.print(".");
		// }
		if (prunedStates % (5000) == 0) {
			diagnostic(sp, estimate);
		}
	}
	
	final private void diagnostic(State sp, float estimate) {
		StateQ.Wrapper t = queue.table[0];
		State s;
		float qf = Float.NaN;
		float qu = Float.NaN;
		if (t != null) {
			qf = t.priority;
			s = t.event;
			if (s != null)
				qu = s.hReward - s.gCost - s.hCost;
		}
		System.out.print("Generated:" + this.generatedStates + "\tPruned:" + prunedStates + "\tExplored:" + this.exploredStates + "\tGoals:" + this.goalStates +  
				"\tOpen:" + queue.size() + "\tUnique:" + duplicates.size());
		System.out.println("\tF:" + String.format("%.2f", fValue)
				+ "\tG:" + sp.g + "\tH:" + sp.h + "\tGC:" + String.format("%.2f",sp.gCost)
				+ "\tHC:" + String.format("%.2f",sp.hCost) + "\tHR:" + String.format("%.2f",sp.hReward) 
				+ "\tQ_U:" + String.format("%.2f",qu) + "\tQ_F:" + String.format("%.2f",qf) 
				+ "\tBestU:" + String.format("%.2f",bestBenefit) + "\tMaxR:" + String.format("%.2f",maxReward) + "\tU:" + String.format("%.2f",estimate));
	}
	
	final private boolean qualityPrune(State sp, float estimate) {
		int h = sp.h;
		
		float pruneEstimate = sp.hReward - PRUNE_HCWEIGHT*sp.hCost - sp.gCost;
		
		
		if (pruneEstimate < 0) {
			prune(sp,estimate);
			return true;
		}

		//float delta = (maxReward - bestBenefit) * PRUNE_UWEIGHT;
		//float cutoff = bestBenefit + delta;
//		if (pruneEstimate < cutoff ||
//				(pruneEstimate == cutoff && makespanInternal(sp) >= this.bestMakespan)) {
		if (pruneEstimate < bestBenefit ||
				(pruneEstimate == bestBenefit && makespanInternal(sp) >= this.bestMakespan)) {
			prune(sp,estimate);
			return true;
			//				return false;
		}
		
		if (h <= bestH) {
			if (h < bestH || sp.g < bestG) {
				bestH = h;
				bestG = sp.g;
				System.out.print("Advance to: (" + bestH + ", " + bestG + ")\t");
				diagnostic(sp, estimate);
			}
		} 
		
		return false;
	}


	public void clearQueue() {
		queue = null;
		System.gc();
		queue = new StateQ(10);
		//		queue.clear();
	}

	/**
	 * Function to parse the running options
	 */
	public void readOptions(String args[]) {
		int i;

		for (i = 2; i < args.length; i++) {

			if (args[i].equalsIgnoreCase("--costPropagation")) {
				if (i + 1 >= args.length || args[i+1].startsWith("--")) {
					System.out.println("Need to specify a cost propagation method after '--costPropagation' flag. [max, sum, or combo]");
					continue;
				}
				
				++i;
				if (args[i].compareToIgnoreCase("max")==0)
					costPropOption=PG.CostPropMax;
				else if (args[i].compareToIgnoreCase("sum")==0)
					costPropOption=PG.CostPropSum;
				else if (args[i].compareToIgnoreCase("combo")==0) {
					costPropOption=PG.CostPropCombo;
				}
				
				System.out.println("The only valid options for '--costPropagation' are 'max', 'sum', and 'combo'.");
				continue;
			}

			if (args[i].equalsIgnoreCase("--heuristic")) {
				if (i + 1 >= args.length || args[i+1].startsWith("--")) {
					System.out.println("Need to specify heuristic after '--heuristic' flag.");
					continue;
				}
				
				if (args[++i].compareToIgnoreCase("rp")==0)
					relaxedPlanHeuristic=true;
				
				System.out.println("The only valid option for '--heuristic' is 'rp'.");
				continue;
			}


			if (args[i].equalsIgnoreCase("--goalselect")) {
				goalSelect = true;
				continue;
			}
			if (args[i].equalsIgnoreCase("--nogoalselect")) {
				goalSelect = false;
				continue;
			}

			if (args[i].equalsIgnoreCase("--lookahead")) {
				if (i + 1 >= args.length || args[i+1].startsWith("--")) {
					System.out
					.println("Need to specify lookahead value. Ignore -la flag.");
					continue;
				}

				try {
					lookaheadOption = (new Integer(args[++i])).intValue();
				} catch (NumberFormatException e) {
					lookaheadOption = 1;
					System.out
					.println("Goal costs option in INCORRECT format. Ignore -la flag.");
				}

				if ((lookaheadOption < -1) || (lookaheadOption > 2)) {
					System.out
					.println("Valid lookaheadOption = -1,0,1 or 2. Use default value");
					lookaheadOption = -1;
				}
				continue;
			}
			if (args[i].equalsIgnoreCase("--auto")) {
				autoFlag = true;
				continue;
			}
			if (args[i].equalsIgnoreCase("--noauto")) {
				autoFlag = false;
				continue;
			}
			if (args[i].equalsIgnoreCase("--quality")) {
				qualityFlag = true;
				continue;
			}
			if (args[i].equalsIgnoreCase("--noquality")) {
				qualityFlag = false;
				continue;
			}
			if (args[i].equalsIgnoreCase("--helpfulActions")) {
				haFlag = true;
				autoFlag = false;
				continue;
			}
			if (args[i].equalsIgnoreCase("--helpfulActions_NE")) {
				haneFlag = true;
				haFlag = true;
				autoFlag = false;
				continue;
			}
			if (args[i].equalsIgnoreCase("--resourceAdjustment")) {
				res_adj = true;
				continue;
			}
			if (args[i].equalsIgnoreCase("--noresourceAdjustment")) {
				res_adj = false;
				continue;
			}

			if (args[i].equalsIgnoreCase("--heuristicWeight")) {
				if (i + 1 >= args.length || args[i+1].startsWith("--")) {
					System.out
					.println("No heuristic weight specified after '--heuristicWeight' flag.");
					continue;
				}

				try {
					H_WEIGHT = (new Float(args[++i])).floatValue();
				} catch (NumberFormatException e) {
					H_WEIGHT = 1;
					System.out.println("The value given after '--heuristicWeight' could not be interpreted as a number.");
				}
				continue;
			}
			if (args[i].equalsIgnoreCase("--outfile")) {
				if (i + 1 >= args.length || args[i+1].startsWith("--")) {
					System.out.println("No outfile name specified.");
					continue;
				}

				outfileName += args[++i];
				continue;
			}

			if (args[i].equalsIgnoreCase("--executeRP")) {
				/** use (as many as possible) actions in the RP to move forward */
				executeRP = true;
				continue;
			}
			if (args[i].equalsIgnoreCase("--noexecuteRP")) {
				/** use (as many as possible) actions in the RP to move forward */
				executeRP = false;
				continue;
			}

			if (args[i].equalsIgnoreCase("--pddl3")) {
				System.out.println(";; PDDL3 mode");
				continue;
			}

			System.out.println("Ignore incorrect flag: " + args[i]);
		}
	}

	
	/** ******* MAIN FUNCTION ********** */
	public static void main(String args[]) {
		@SuppressWarnings("unused")
		PDDL21Parser parser21 = new PDDL21Parser(System.in);
		Planner sapa = new Planner();

		if (args.length < 2) {
			sapa.printUsage();
			System.exit(1);
		} 

		sapa.readOptions(args);

		LiftedProblem problem = PDDL21Parser.prob;

		FileInputStream pddl_file;

		/*
		 * Instantiate a parser
		 */
		Date d = new Date();
		timeX = d.getTime(); // Get the starting time of the program

		/** * Parse the Domain specification file *** */
		try {
			pddl_file = new java.io.FileInputStream(args[0]);
			PDDL21Parser.ReInit(pddl_file);
		} catch (java.io.FileNotFoundException e) {
			System.out.println("Domain file " + args[0] + " not found !!!");
			return;
		}

		try {
			PDDL21Parser.parse_domain_pddl();
			System.out.println(";;Domain " + ((Domain) problem).name
					+ " successfully parsed!" + " num actions = "
					+ problem.actions.count);
		} catch (edu.asu.sapa.parsing.ParseException e) {
			System.out.println("Exception while parsing domain "
					+ ((Domain) problem).name + "!");
			e.printStackTrace();
			return;
		}

		/** ** Parse the problem file *** */
		try {
			pddl_file = new java.io.FileInputStream(args[1]);
			PDDL21Parser.ReInit(pddl_file);
		} catch (java.io.FileNotFoundException e) {
			System.out.println("Problem file " + args[1] + " not found !!!");
			return;
		}

		try {
			PDDL21Parser.parse_problem_pddl();
			System.out.println(";;Problem " + problem.name
					+ " successfully parsed!" + " num objects = "
					+ problem.constants.count);
		} catch (edu.asu.sapa.parsing.ParseException e) {
			System.out.println("Exception while parsing problem "
					+ problem.name + "!");
			e.printStackTrace();
			return;
		}

		sapa.initialize(problem);

		//		goalSelection(sapa);

		sapa.initSearch();
		State goalState = null;
		ArrayList<Context> bestPlan = null;
		do {
			goalState = sapa.getNextSolution();
			if (goalState == null)
				break;
			bestPlan = sapa.extractPlan(goalState);
			sapa.printSolution(bestPlan);
			//			sapa.bestH = Float.POSITIVE_INFINITY;
			// if ((sapa.bestBenefit) / (sapa.maxReward - initCost) > 0.8) {
			// sapa.useRelaxedPlan = false;
			// System.out.println("\nVery close");
			// old = sapa.bestBenefit;
			// sapa.queue.clear();
			// sapa.queue.add(goalState, 0);
			// System.out.println("prune");
			// }
		} while (true);
		if (sapa.interrupted) {
			System.out.println("\nThis is the non-threaded version.");
		}
		if (bestPlan != null) {
			System.out.println("\nExhausted Queue. Best plan found:");
			sapa.printSolution(bestPlan);
		} else {
			System.out.println("\nExhausted Queue. No solutions found.");
		}
	}
	

}
