/**
 * 
 * Copyright (C) 2011 Cody Stoutenburg . All rights reserved.
 *
 *       This program is free software; you can redistribute it and/or
 *       modify it under the terms of the GNU Lesser General Public License
 *       as published by the Free Software Foundation; either version 2.1
 *       of the License, or (at your option) any later version.
 *
 *       This program is distributed in the hope that it will be useful,
 *       but WITHOUT ANY WARRANTY; without even the implied warranty of
 *       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *       GNU Lesser General Public License for more details.
 *
 *       You should have received a copy of the GNU Lesser General Public License
 *       along with this program; if not, write to the Free Software
 *       Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA. 
 * 
 */
package ca.usherbrooke.thread;

import java.util.ArrayList;
import java.util.List;
import java.util.Random;

import ca.usherbrooke.agent.SimpleAutonomousAgent;
import ca.usherbrooke.agent.action.Action;
import ca.usherbrooke.goal.BasicGoal;
import ca.usherbrooke.model.equipment.EquipmentModel;
import ca.usherbrooke.model.world.IWorldModel;
import ca.usherbrooke.tools.position.Vector2D;

/**
 * @author Cody Stoutenburg
 * 
 */
public class TaskGenerateAction extends Task<Action> {
	private static Random rand = new Random();

	private Action generatedAction;

	private final SimpleAutonomousAgent agent;
	private final List<Long> infoObjectMemory;

	public TaskGenerateAction(SimpleAutonomousAgent a) {
		agent = a;
		generatedAction = null;
		infoObjectMemory = new ArrayList<Long>();
	}

	@Override
	protected void doTask() {
		while (generatedAction == null) {
			generatedAction = generateAction();
		}
	}

	@Override
	public Action getResult() {
		return generatedAction;
	}

	private void refreshLocalMemory() {
		BasicGoal current = agent.getSelectedGoal();
		if (current != null) {
			IWorldModel world = agent.getKnownWorld();
			List<Float> vals = current.getValues();
			for (Float val : vals) {
				EquipmentModel equipment = world.getEquipment(val.longValue());
				if (equipment != null) {
					infoObjectMemory.add(val.longValue());
				}
			}
		}

	}

	private Action generateAction() {
		// first refresh local memory (try to find equipment and
		// entities)
		refreshLocalMemory();

		BasicGoal currentGoal = agent.getSelectedGoal();
		// get all values of the goal
		List<Float> values = currentGoal.getValues();

		// add some new values of the world from local memory
		IWorldModel world = agent.getKnownWorld();// agent.getWorld();
		for (Long guidObject : infoObjectMemory) {
			EquipmentModel equipment = world.getEquipment(guidObject);
			// we may lost of see the equipment
			if (equipment != null) {
				if (equipment.getHolder() == null) {
					Vector2D pos = equipment.getCenterPosition();
					values.add(pos.getX());
					values.add(pos.getY());
				}
			}
		}

		Long before = System.currentTimeMillis();
		Action currentAction = generateNewAction(values);
		Long total = System.currentTimeMillis() - before;
		System.out.println("select an action take: " + total + "ms");

		return currentAction;
	}

	private Action generateNewAction(List<Float> values) {
		Action currentAction = null;
		List<Action> allAction = Action
				.generateAllActionWithExplorationIfNoOther(agent, values,
						10000L);

		if (allAction.size() == 1) {
			currentAction = allAction.get(0);
		} else if (allAction.size() > 1) {
			// now it's time to select an action between all actions!
			Float totalCost = 0f;
			Float totalCostReverse = 0f;
			Float totalWon = 0f;
			Float[] estimatedCost = new Float[allAction.size()];
			Float[] estimatedCostRevese = new Float[allAction.size()];
			Float[] estimatedWon = new Float[allAction.size()];

			// the action value represent the chance that the action is taken
			Float[] actionProbability = new Float[allAction.size()];

			// 1 calculate the estimated cost to each action
			for (int i = 0; i < allAction.size(); ++i) {
				Float cost = allAction.get(i).generateEstimatedCost();
				estimatedCost[i] = cost;
				totalCost += cost;
			}

			// 2 calculate the proba for cost
			for (int i = 0; i < estimatedCost.length; ++i) {
				estimatedCostRevese[i] = totalCost - estimatedCost[i];
				totalCostReverse += estimatedCostRevese[i];
			}

			// 3 calculate the estimated won to each action
			for (int i = 0; i < allAction.size(); ++i) {
				Float won = allAction.get(i).generateEstimatedWon();
				estimatedWon[i] = won;
				totalWon += won;
			}

			// 4 calculate the probability to choose an action according to
			// it's cost and it's won
			for (int i = 0; i < actionProbability.length; ++i) {
				// the probability is 1-relative cost
				Float probCost = estimatedCostRevese[i] / totalCostReverse;
				Float probWon = estimatedWon[i] / totalWon;

				actionProbability[i] = probCost * 0.3f + probWon * 0.7f;
			}

			// Select the action according to its probability
			Float selected = rand.nextFloat();
			Float beginChance = 0.0f;
			for (int i = 0; i < actionProbability.length; ++i) {
				beginChance += actionProbability[i];

				if (selected <= beginChance) {
					currentAction = allAction.get(i);
					break;
				}
			}
		} else {
			System.out.println("no action generated");
		}
		return currentAction;
	}
}
