/*
 *  DONE live dead state, simplifies analysis
 *  DONE FIX get sensors added to state, but differentiated
 *  DONE version control
 *  DONE import colt library documentation
 *  TODO make clustering algorithm mathematically sound, right now calculations are mathematically incoherent - they just work without any rigorous tie into theory 
 *  DONE cluster metric
 *  - DONE clustering algorithm
 *  - DONE history query
 *    - DONE select cols by regexp - return col ref list
 *    - DONE select rows by criteria - return row ref list
 *    - DONE select history subset by col and row ref lists
 *  - DONE history clusterer
 *    - DONE convert history subset into matrix for clustering algorithm
 *    - DONE cluster sensor and state histories, divide # clusters in former by latter
 *  TODO AI agents
 *  - TODO policy (generated by GA) [global search - best transition function for landscape]
 *    - TODO independent of brain type, i.e. could be weighted rule based, nearest cluster, ANN, etc.  brain is initialized with genotype
 *    - TODO must implement Wolfram's type 4 machine, so it can carry out universal computation 
 *  - DONE semantic, uses the kill and death sensors to pick a good immediate move [sub type of best first]
 *    - DONE use the existing closestAgent sensors, instead of the new convoluted ones
 *    - DONE priority is avoiding bullets
 *    - DONE then shoot closest enemy
 *  - DONE best first [local search - best neighborhood transition function]
 *    - DONE puppet model state where each agent's action can be fixed
 *      - DONE each puppet agent inherits properties of real agents
 *      - DONE model determines state from list of agent moves, as long as # of agents
 *        or
 *      - TODO puppet state is exactly the same as real state, i.e. deep copy via serialization
 *      - TODO do 30 runs per agent choice for good stat, return stat 
 *    - DONE enumerate all possibilities with accompanying measure
 *    - DONE pick move with highest measure statistic (i.e. highest average)
 *  
 *  TODO make a conversion utility for converting between lists and arrays with boxing
 *  TODO some nice way to hand the initialization of variables that are state variables, but need to be maximized or minimized by some function, so they can't just be set to an infinite value 
 *  TODO redo all lists with matrices and vectors 
 *  TODO Instead of using hashmap, use reflection to access agent state
 *  TODO See if agents can be fit into lisp formal analysis schema
 *  TODO Make sure is loosely coupled enough that can easily incorporate other frameworks, i.e. MASON for simulation & visualization, perhaps JNS to model networks, etc.
 *  DONE With actuators, differentiate between parameters and the fire variable
 *  DONE Make a map that retrieves sets of values based on string regexp match
 *  TODO find a more Java way to do Box, like a Box<T>
 *  - TODO add another typing layer, i.e. signifying whether data is symbolic or numeric (i.e. what kind of distance function can be used for comparison)
 */


package net.yters.controller;

import java.io.*;
import java.util.ArrayList;

import net.yters.model.ModelState;
import net.bithaven.roguelike.ui.*;

public class Controller {
	public static void main(String args[]) throws IOException, InterruptedException {
		Controller game = new Controller();
		game.gameloop();
	}
	
	ModelState f_state = new ModelState(); 
	UserInterface f_ui = new RogueLikeUI(10,10);
	
	void gameloop() throws IOException, InterruptedException {
		boolean gameover = false;
		int input = 0;
		
		while(!gameover) {
			f_ui.update(f_state.f_env.toString());
			input = parseInput(f_ui.getInput());
			
			switch(input) {
			case -1:
				gameover = true;
				break;
			default:
			}
			
			f_state = nextGameState(f_state, input);
		}
		
		f_ui.cleanup();

		// Print history of simulation
		System.out.println(f_state.f_collector.historyToString());
	}
	
	ModelState nextGameState(ModelState state, double input) {
		state.f_hero.f_state.get(state.f_hero.qualifyField("choice")).f_value = input; // This convoluted technique is used incase the hero agent is to combine algorithmic and human state input
		state.execute();
		
		return state;
	}
	
	String getInput() throws IOException {
		BufferedReader i = new BufferedReader(new InputStreamReader(System.in)); 
		return i.readLine();
	}
	
	int parseInput(ArrayList<String> input) {
		if(input.contains("a")) {
			return 1;
		}
		if(input.contains("d")) {
			return 2;
		}
		if(input.contains("w")) {
			return 0;
		}
		if(input.contains("s")) {
			return 3;
		}
		
		if(input.contains("quit") || input.contains("q")) {
			return -1;
		}
		return -2;
	}
}
