package ipc;


import ipc.Node;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.LinkedList;
import java.util.List;
import java.util.TreeMap;
import java.util.Vector;

import rddl.ActionGenerator;
import rddl.EvalException;
import rddl.RDDL.LCONST;
import rddl.RDDL.PVARIABLE_DEF;
import rddl.RDDL.PVARIABLE_STATE_DEF;
import rddl.RDDL.PVAR_NAME;
import rddl.RDDL.PVAR_INST_DEF;

import java.util.Random;

/**
 * Enhanced UCT agent that can be run as normal UCT or a modified version of UCT
 * based on input parameters.
 */
public final class TimedUCT<S extends State, A> extends Agent<S, A> {

    /** UCT Constant. */
    private double uctConstant_;


    /** Number of distinct UCT trees to build and evaluate. */
    private int nEnsembles_;

    /** Ensemble method. */
    private EnsembleMethod ensembleMethod_;

    /** Simulation method. */
    private SimulationMethod simulationMethod_;

    private myDD _spudd;
    private String spudd_file;
    private int fHorizon;
    private Random _rand; 
    
    /** Method for combining multiple trees. */
    public enum EnsembleMethod {
        ROOT_PARALLELIZATION,
        PLURALITY_VOTE
    }
    
    /** Method for quickly simulating entire game. */
    public enum SimulationMethod {
        RANDOM,
	NOOP
    }

    private Simulator<S,A> simulator_ = null;
/**
     * Defines a general node in a UCT tree. The rewards_ field is lazily
     * initialized because its size isn't known until the first rewards vector
     * is passed to update.
     */
	private String inst_name;
	private int horizon;    

    /**
     * Create a traditional UCT agent.
     * 
     * @param nSimulations
     *            the number of complete games to simulate.
     * @param uctConstant
     *            controls balance between exploration and exploitation.
     */
    public TimedUCT(double uctConstant, Simulator<S, A> simulator, int fHorizon, long seed, String inst_name, int hor) {
	this.horizon = hor;
        this.inst_name = inst_name;
        if (uctConstant < 0){
        	System.exit(1);
        }
	
        this.fHorizon = fHorizon;
        name_ = "UCT";
        uctConstant_ = uctConstant;
	ActionNode.SPARSE_SMAPLE_SIZE = -1;
        nEnsembles_ = 1;
        ensembleMethod_ = EnsembleMethod.ROOT_PARALLELIZATION;
        simulationMethod_ = SimulationMethod.RANDOM;
        simulator_ = simulator.copy();
	_rand = new Random(seed);
	StateNode.inst_name = inst_name;
    }

    public TimedUCT(double uctConstant, int sparseSampleSize, 
    			Simulator<S, A> simulator, int fHorizon, long seed, String inst_name, int hor) {
        this(uctConstant, simulator, fHorizon, seed, inst_name, hor);
        if (sparseSampleSize < 1 && sparseSampleSize != -1)
            throw new IllegalArgumentException("Sparse Sample Size > 0 or = -1");
	ActionNode.SPARSE_SMAPLE_SIZE = sparseSampleSize;
    }

    public TimedUCT(double uctConstant, int sparseSampleSize,
            int ensembleTrials, String ensembleMethod, Simulator<S, A> simulator, int fHorizon, long seed, String inst_name, int hor) {
        this(uctConstant, sparseSampleSize, simulator, fHorizon, seed, inst_name, hor);
        if (ensembleTrials < 1)
        	System.exit(1);
        nEnsembles_ = ensembleTrials;
        ensembleMethod_ = EnsembleMethod.valueOf(ensembleMethod);
    }

    /**
     * Builds UCT trees and then selects the best action.
     * If the number of trajectories is less than the number of actions at the
     * root state then not all actions are explored at least one time. In this
     * situation the best action is selected from only those that have been
     * explored.
     */
    @Override
    public A selectAction(S state, long timeout, int time) {
	long tim = System.currentTimeMillis();
	simulator_.resetRewards();	
	simulator_.setState(state);
	simulator_.setTime(time);
	
	Node<S,A> current = new StateNode<S, A>(state, simulator_.getLegalActions());
	
    	SPUDDer.updateDD(inst_name, (long)(0.3d*timeout));

        int agentTurn = simulator_.getState().getAgentTurn();
        // Generate UCT trees and save root action values
            
        double tr = 0;
        int nsim = 0;
	long timeoutcpy =  timeout-(System.currentTimeMillis()-tim);
	tim = 0;
	while( timeoutcpy > tim ){

		tim = System.currentTimeMillis();
		if( simulator_ == null ){
			System.err.println("Simulator pulled from under my leg");
			System.exit(1); 
		}
		
		Simulator<S, A> simcpy = simulator_.copy();
		double[] rewards = playSimulation(current, simcpy);


		simcpy.resetRewards();
		simcpy = null;
		tr += rewards[0];
		tim = (System.currentTimeMillis() - tim);
		timeoutcpy -= tim;
		++nsim;
    }

    Main.activityLog.log("Total rewards of " + nsim + " simulations " + tr);
	ActionNode<S, A> ret = (ActionNode<S,A>)current.getMaxChild();
	
		A act = ret.getAction();
		return act;	
    }

    /**
     * This method walks down the tree making decisions of the best nodes as it
     * goes. When it reaches an unexplored leaf node it plays a random game to
     * initialize that nodes value.
     * 
     * @param node
     *            current state node being traversed in tree.
     * @param simulator
     *            contains current state of game being played.
     * @return rewards of simulated game are passed up the tree.
     */
    private double[] playSimulation(
            Node<S, A> node, Simulator<S, A> simulator) {
	

	if( simulator.getTime() > horizon ){
		System.err.println("this should never happen");
		System.exit(1);
	}
	

        double[] rewards = null;
	List<Node<S,A>> path = new ArrayList<Node<S,A>>();

	while( true ){
		path.add(node);	
		if ((node instanceof StateNode) && (simulator.isTerminalState() || node.getVisits() == 0)){
		        rewards = simulateGame(simulator);
			break;
		}
		else
			node = node.selectChild(simulator, _rand, horizon-simulator.getTime()-1);
	}

	if( path.size() == 0 ){
		throw new RuntimeException("Path size zero");
	}

//	System.out.println("Path: " + path);
//	System.out.println("traj length " + path.size());
	for( Node<S,A> n : path ){
		n.update(rewards);
	}
        return rewards;
    }

    /**
     * Quickly simulate a game from the current state and return accumulated
     * reward.
     * 
     * @param simulator
     *            a copy of the simulator you want to use to simulate game.
     * @return accumulated reward vector from the game.
     */
    private double[] simulateGame(Simulator<S, A> simulator) {

	if( simulator.getTime() > horizon ){
		System.err.println("this should never happen");
		System.exit(1);
	}

//long tiem = System.currentTimeMillis();

 	int startdepth = simulator.getTime();	
	
 	int depth = 0;
 	int fH = 1;
  	if( !SPUDDer.isFinished(inst_name, horizon-startdepth) )	
		fH = fHorizon;



	double[] prevreward = null;

	
	

        while ( !simulator.isTerminalState()) {
		    if( depth == fH-1 || ( fH == 0 && depth == 0 ) )
		    	prevreward = simulator.getRewards();	    

		    if( depth == fH ){
				double eval;
				try{
					eval = ((Double)Query.query(simulator, true, horizon-startdepth-depth-1, inst_name));
					prevreward[0] += eval;
					break;
				}catch(RuntimeException e){
		////			Main.exceptionLog.log("Could not query.. continuing random game "+inst_name);
				}
		    }	
    
	    switch (simulationMethod_) {
		    case RANDOM:
			List<A> actions = simulator.getLegalActions();
			A randomAction = actions.get((int) (_rand.nextDouble() * actions.size()));
			simulator.takeAction(randomAction);
			break;
		    case NOOP:
			IPCAction noop = new IPCAction();
			simulator.takeAction((A)noop);
	    }
		    	
	    ++depth;	
	}
	return simulator.getRewards();
    }

}
