package br.usp.pcs.lti.planning;

import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Random;

import rddl.EvalException;
import rddl.RDDL;
import rddl.RDDL.LCONST;
import rddl.RDDL.LVAR;
import rddl.RDDL.PVAR_INST_DEF;
import rddl.RDDL.PVAR_NAME;
import rddl.RDDL.TYPE_NAME;
import rddl.State;
import rddl.parser.parser;
import rddl.policy.Policy;
import rddl.sim.Simulator;
import rddl.viz.StateViz;

/**
 * Simulator of a RDDL problem. This is prepared for multi-attribute, as it returns rewards separetely for each
 * attribute.
 * 
 * @author mlk
 * 
 */
public class MASimulator extends Simulator {

    private static final String RDDL_FILE = "resources/traffic_noInt2.rddl";
    private static final String INSTANCE_NAME = "vehicles7cross1";

    private static final PVAR_NAME PVAR_VALUE = new PVAR_NAME("VALUE");
    private static final PVAR_NAME PVAR_AT = new PVAR_NAME("at");

    public MASimulator(RDDL rddl, String instance_name) throws Exception {
        super(rddl, instance_name);
    }

    public void setRewardFunction(double[] weightR) {
        // Mudar pesos do reward
        int i = 0;
        for (PVAR_INST_DEF nonFluent : this._n._alNonFluents) {
            if (nonFluent._sPredName.equals(PVAR_VALUE)) {
                nonFluent._oValue = weightR[i++];
            }
        }
    }

    @Override
    public MAResult run(Policy p, StateViz v, long rand_seed) throws EvalException {

        // Signal start of new session-independent round
        p.roundInit(Double.MAX_VALUE, this._i._nHorizon, 1, 1);

        // Reset to initial state
        this.resetState();

        // Set random seed for repeatability
        this._rand = new Random(rand_seed);

        // Keep track of reward
        TYPE_NAME vehicle_type = new TYPE_NAME("vehicle");
        ArrayList<LCONST> vehicles = this._state._hmObject2Consts.get(vehicle_type);

        double accum_reward = 0.0d;
        double cur_discount = 1.0d;
        ArrayList<Double> rewards = new ArrayList<Double>(this._i._nHorizon);
        HashMap<LCONST, Double> accumRewardPerVehicle = new HashMap<LCONST, Double>(vehicles.size());
        for (LCONST vehicle : vehicles) {
            accumRewardPerVehicle.put(vehicle, 0.0);
        }

        // Run problem for specified horizon
        for (int t = 0; t < this._i._nHorizon; t++) {

            // Display state/observations that the agent sees
            v.display(this._state, t);

            // Get action from policy
            // (if POMDP and first state, no observations available yet so a null is passed)
            State state_info = ((this._state._alObservNames.size() > 0) && t == 0) ? null : this._state;
            ArrayList<PVAR_INST_DEF> action_list = p.getActions(state_info);

            // Check state-action constraints
            this._state.checkStateActionConstraints(action_list);

            // Compute next state (and all intermediate / observation variables)
            this._state.computeNextState(action_list, this._rand);

            // Calculate reward / objective and store
            double reward = ((Number) this._state._reward.sample(new HashMap<LVAR, LCONST>(), this._state, this._rand))
                    .doubleValue();
            rewards.add(reward);
            accum_reward += cur_discount * reward;
            cur_discount *= this._i._dDiscount;

            HashMap<LCONST, Double> rewardPerVehicle = this.getRewardPerVehicle(vehicles);
            // Atualiza acumulado
            for (LCONST vehicle : accumRewardPerVehicle.keySet()) {
                Double accumReward = accumRewardPerVehicle.get(vehicle);
                accumReward += cur_discount * rewardPerVehicle.get(vehicle);
                accumRewardPerVehicle.put(vehicle, accumReward);
            }

            // Done with this iteration, advance to next round
            this._state.advanceNextState(false /* do not clear observations */);
        }

        // Signal start of new session-independent round
        p.roundEnd(accum_reward);

        // Problem over, return objective and list of rewards (e.g., for std error calc)
        v.close();

        // Transform map into list
        double[] accumRewardPerVehicleList = new double[vehicles.size()];
        int i = 0;
        for (LCONST vehicle : vehicles) {
            Double reward = accumRewardPerVehicle.get(vehicle);
            accumRewardPerVehicleList[i++] = reward;
        }
        System.out.println(vehicles);
        return new MAResult(accum_reward, rewards, accumRewardPerVehicleList);
    }

    private HashMap<LCONST, Double> getRewardPerVehicle(ArrayList<LCONST> vehicles) throws EvalException {
        HashMap<LCONST, Double> rewardPerVehicle = new HashMap<LCONST, Double>(vehicles.size());
        for (LCONST vehicle : vehicles) {
            rewardPerVehicle.put(vehicle, 0.0);
        }
        ArrayList<ArrayList<LCONST>> pares = this._state.generateAtoms(PVAR_AT);
        for (ArrayList<LCONST> par : pares) {
            Boolean esperando = (Boolean) this._state.getPVariableAssign(PVAR_AT, par);
            if (esperando) {
                ArrayList<LCONST> paramV = new ArrayList<LCONST>(1);
                paramV.add(par.get(0));
                Double valor = (Double) this._state.getPVariableAssign(PVAR_VALUE, paramV);
                rewardPerVehicle.put(par.get(0), rewardPerVehicle.get(par.get(0)) + valor);
            }
        }
        return rewardPerVehicle;
    }

    public static void main(String[] args) throws Exception {

        File f = new File(RDDL_FILE);
        RDDL rddl = new RDDL(parser.parse(f));

        // Planner planner = new Planner();
        // SPUDDPolicy spuddPolicy = planner.learnSPUDDPolicy(rddl, INSTANCE_NAME);
        //
        // Utils.serializeObject(spuddPolicy, "resources/policy.ser");

        // Simulação
        MASimulator sim = new MASimulator(rddl, INSTANCE_NAME);

        double[] pesos = { -1.0, -10.0 };
        sim.setRewardFunction(pesos);

        StateViz viz = new TrafficDisplay();

        Random rand_seed = new Random();
        // Policy policy = (SPUDDPolicy) Utils.deserializeObject("resources/policy.ser");
        Policy policy = new RandomPolicy(INSTANCE_NAME);
        MAResult result = sim.run(policy, viz, rand_seed.nextLong());
        System.out.println("==> " + result);

        viz.close();
    }
}
