/*
 * Javlov - a Java toolkit for reinforcement learning with multi-agent support.
 * 
 * Copyright (c) 2009 Matthijs Snel
 * 
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
package net.javlov.example.landmarks;

import java.awt.Point;
import java.util.ArrayList;
import java.util.List;

import net.javlov.AbstractOption;
import net.javlov.Action;
import net.javlov.LearningRate;
import net.javlov.Option;
import net.javlov.Policy;
import net.javlov.State;
import net.javlov.world.grid.Direction;

public class ReachLandmarkOption extends AbstractOption {

	protected int dist;
	protected Point endPoint;
	protected List<Action> actions;
	protected Point[] others;
	
	public ReachLandmarkOption(String name, int dist, Point end, List<Action> actionPool, Point[] others) {
		super(name);
		this.dist = dist;
		endPoint = end;
		actions = actionPool;
		this.others = others;
	}
	
	public boolean inRange(double x, double y, Point p, int dist) {
		return Math.abs(x - p.x) + Math.abs(y - endPoint.y) <= dist;
	}
	
	@Override
	public <T> double getBeta(State<T> s) {
		Double beta = betas.get(s);
		if ( beta != null )
			return beta;
		
		double[] data = (double[])s.getData();
		double x = data[0], y = data[1];
		if ( x == endPoint.x && y == endPoint.y )
			return 1;
		if ( inRange(x,y,endPoint,dist) ) {
			for ( Point p : others )
				if ( inRange(x,y,p,dist) )
					return 0.1;
			return 0;
		}
		return 1;
	}

	@Override
	public <T> boolean isEligible(State<T> s) {
		return getBeta(s) < 1;
	}
	
	@Override
	public <T> void update(State<T> s, Option o, double update) {
		// TODO Auto-generated method stub
		
	}

	@Override
	public <T> Action doStep(State<T> s, double reward) {
		if ( Math.random() < getBeta(s) ) {
			//System.out.println(name + ":Finished in " + s);
			setFinished();
			return null;
		}
		
		return doStepWithoutTerminationCheck(s, reward);
	}

	protected <T> Action doStepWithoutTerminationCheck(State<T> s, double reward) {
		double[] data = (double[])s.getData();	
		int xdir = (int)Math.signum(endPoint.x - data[0]),
			ydir = (int)Math.signum(endPoint.y - data[1]);
		
		int dirid;
		if ( actions.size() == 4 ) { //diagonal move not allowed
			if ( Math.abs(xdir) == Math.abs(ydir) ) { //both |1|
				if ( Math.random() < 0.5 )
					xdir = 0;
				else
					ydir = 0;
					
			}
			dirid = Direction.get(xdir, ydir).getID();
			return actions.get(dirid/2);
		}
		dirid = Direction.get(xdir, ydir).getID();
		return actions.get(dirid);
	}
	
	@Override
	public <T> Action firstStep(State<T> s) {
		//System.out.println(name + ":Firstep in " + s);
		finished = false;
		return doStepWithoutTerminationCheck(s, 0);
	}

	@Override
	public Policy getPolicy() {
		return null;
	}

	@Override
	public void setPolicy(Policy p) {
		// TODO Auto-generated method stub
		
	}

	@Override
	public <T> void lastStep(State<T> s, double reward) {
		setFinished();		
	}

	@Override
	public LearningRate getLearnRate() {
		// TODO Auto-generated method stub
		return null;
	}

	@Override
	public void setLearnRate(LearningRate rate) {
		// TODO Auto-generated method stub
		
	}
	
	@Override
	public void init() {
		betas.clear();
	}
}
