package ch.idsia.ai.agents.controllers.myai;

import java.util.Vector;

/**
 * Created by IntelliJ IDEA.
 * User: blakekl10
 * Date: Jun 15, 2010
 * Time: 2:12:58 PM
 * To change this template use File | Settings | File Templates.
 */
public class DecisionTree
{
    private Node root;
    private int xCell;
    private int yCell;

    //Enum for pointers to nodes from root.
    private final int ENEMY = 0;
    private final int OBSTACLE = 1;
    private final int NO_ELEMENT = 2;
    private final String ENEMY_CLASS = "ch.idsia.ai.agents.controllers.myai.Enemy";
    private final String OBSTACLE_CLASS = "ch.idsia.ai.agents.controllers.myai.Obstacle";
    private Node doShoot;

    //Creates the decision tree based on the design diagram. all 6 options are included.
    public DecisionTree()
    {
        root = new Node("ROOT");
        doShoot = new Node("doShoot", 5);
        root.addNode(new Node("TackleEnemy"));
        root.addNode(new Node("AvoidObstacle"));
        root.addNode(new Node("ProceedToExit"));
        Node current = root.getNode(ENEMY);
        current.addNode(new Node("doRun", 5));
        current.addNode(new Node("doJump", 5));
        current.addNode(doShoot);
        current = root.getNode(OBSTACLE);
        current.addNode(new Node("doJump", 0));
        current = root.getNode(NO_ELEMENT);
        current.addNode(new Node("doRun", 2));
        current.addNode(new Node("doRight", 1));

    }

    public Node decide(Ratio[][] qValue, double rate, Vector environment)
    {
        xCell = 0;
        yCell = 0;
        //traverse tree to leaf node using q-value data and learning rate to influence decision.
        Node current = root;

        //No enemies or Obstacles.
        if(environment.size() == 0)
        {
            current = root.getNode(NO_ELEMENT);
        }

        else if (((Element)(environment.get(0))).getName().equals(OBSTACLE_CLASS))
        {
            current = root.getNode(OBSTACLE);
            xCell = 1;
        }

        else if (((Element)(environment.get(0))).getName().equals(ENEMY_CLASS))
        {
            current = root.getNode(ENEMY);
            int height = ((Element)environment.get(0)).getHeight();
            if(height < 0)
                xCell = 2;
            else if(height < 1)
                xCell = 3;
            else if(height < 5)
                xCell = 4;
            else
                xCell = 5;
        }
        else
        {
            System.out.println("error condition reached.");
            System.out.println(environment.size());
            for(int i = 0; i < environment.size(); ++i)
                System.out.println(((Element)(environment.get(0))).getName());
            return null;
        }


        int max = 0; //stores number of child node that is best.
        double maxPoint = 0.0;
        int nextVal = 0;

        //find the best solution according to current knowledge.
        //System.out.println("\tThreat: "+current.getName());
        for(int i = 0; i < current.getNumberOfChildren(); ++i)
        {
            while(qValue[xCell][nextVal].getRatio() < 0)
            {
                nextVal++;
            }
            max = maxPoint > qValue[xCell][nextVal].getRatio()*current.getNode(i).getPointValue() ? max:i;
            maxPoint = maxPoint > qValue[xCell][nextVal].getRatio()*current.getNode(i).getPointValue() ? maxPoint:qValue[xCell][nextVal].getRatio()*current.getNode(i).getPointValue();
            nextVal++;
        }

        //use random value to compare to learning rate for exploration.
        double random = Math.random();

        //Create a node reference that will point to the final decision node.
        Node action = null;

        //The AI will use exploratory methods. The current best solution will NOT be selected unless
        //it is the only available option.
        if(random < rate)
        {
            int possible = current.getNumberOfChildren() - 1;
            int randomNode = (int)Math.round(Math.random()*possible);
            //System.out.println("\t\tpicking random node: "+current.getNode(randomNode).getName());
            action = current.getNode(randomNode);
        }

        //The AI will exploit its current knowledge.
        else
        {
            //System.out.println("\tExploiting Knowledge: "+current.getNode(max).getName());
            action = current.getNode(max);
        }

        return action;
    }

    public void changeAbleToShoot(boolean value)
    {
        if (!value && root.getNode(0).getNumberOfChildren() > 2)
        {
            Node current = root.getNode(0);
            current.removeNode(2);
        }
        if (value && root.getNode(0).getNumberOfChildren() < 3)
        {
            Node current = root.getNode(0);
            current.addNode(doShoot);
        }
    }
}
