/*
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
package net.javlov.example.grid;

import java.util.ArrayList;
import java.util.List;

import javax.swing.Timer;

import net.javlov.Agent;
import net.javlov.DecayingLearningRate;
import net.javlov.EpisodicRewardStepStatistic;
import net.javlov.FixedLearningRate;
import net.javlov.Option;
import net.javlov.Policy;
import net.javlov.QLearningAgent;
import net.javlov.SarsaAgent;
import net.javlov.Simulator;
import net.javlov.example.ExperimentGUI;
import net.javlov.policy.EGreedyPolicy;
import net.javlov.vf.BasicOptionTrace;
import net.javlov.vf.OptionTraces;
import net.javlov.vf.QMap;
import net.javlov.vf.TableOptionValues;
import net.javlov.vf.TracedQTable;

import org.apache.commons.math3.stat.descriptive.SummaryStatistics;

public class GridMain implements Runnable {
	
	protected Simulator sim;
	protected QMap<double[]> vt;
	protected SimpleGridWorld world;
	public boolean gui = false;
	
	public int episodes = 300, runs = 100;
	
	//learning params
	public double gamma = 1, lambda = 0.9, epsilon = 0.1;
	
	public static void main(String[] args) {
		GridMain m = new GridMain();
		m.init();
		m.start();
	}
	
	public void init() {
		
		makeWorld();
		
		Agent a = makeAgent( makeActions() );
		
		//simulator / glue
		sim = new Simulator();
		sim.setAgent(a);
		sim.setEnvironment(world);		
	}
	
	public void makeWorld() {
		//world
		world = new SimpleGridWorld(50,50,false);
		world.setGoal(8,8);
	}
	
	public List<Option<? super double[]>> makeActions() {
		//actions
		List<Option<? super double[]>> optionPool = new ArrayList<Option<? super double[]>>(4);
		optionPool.add( new GridMove(Direction.north, world) );
		optionPool.add( new GridMove(Direction.east, world) );
		optionPool.add( new GridMove(Direction.south, world) );
		optionPool.add( new GridMove(Direction.west, world) );
		return optionPool;
	}
	
	public Agent makeAgent(List<Option<? super double[]>> optionPool) {				
		//value function
		vt = new QMap<double[]>(optionPool, 150); //for TD(0)

		OptionTraces trs = new OptionTraces(new BasicOptionTrace(gamma, lambda,1,false), 0.001);
		TracedQTable<double[]> tqt = new TracedQTable<double[]>( vt, trs);
		
		//policy
		Policy<double[]> pi = new EGreedyPolicy<double[]>(vt, epsilon, optionPool);
		
		//agent
		SarsaAgent<double[]> agent = new QLearningAgent<double[]>(vt, gamma); //use vt instead of tqt for no el. traces
		agent.setPolicy(pi);
		agent.setLearnRate( new DecayingLearningRate(1, optionPool.size(), 0.3) );
		//agent.setLearnRate( new FixedLearningRate(0.4) );
		return agent;
	}
	
	public void start() {
		if ( gui ) {
			GridWorldView wv = new GridWorldView(world,40);
			Timer timer = new Timer(1000/24, wv);
			ExperimentGUI g = new ExperimentGUI("Simple GridWorld", wv, sim);
			sim.suspend();
			timer.start();
			new Thread(this).start();
		} else
			run();
	}
	
	public void run() {
		EpisodicRewardStepStatistic stat = new EpisodicRewardStepStatistic(episodes);
		SummaryStatistics sstat[] = new SummaryStatistics[episodes];
		for ( int i = 0; i < episodes; i++ ) {
			sstat[i] = new SummaryStatistics();
		}
		sim.addStatistic(stat);
		
		long start = 0, total = 0;
		for ( int r = 0; r < runs; r++ ) {
			sim.init();
			for ( int i = 0; i < episodes; i++ ) {
				start = System.currentTimeMillis();
				sim.runEpisode();
				total += System.currentTimeMillis() - start;
				sim.reset();
				sstat[i].addValue( stat.getSteps()[i] );
				//try { Thread.sleep(500); } catch(InterruptedException e){}
				//sstat[i].addValue( stat.getRewards()[i] );
			}
		}
		//for ( int i = 0; i < episodes; i++ ) {
		//	System.out.println( sstat[i].getMean() );
		//}
		System.out.println(total);
	}

}
