package com.teejdeej.simpleneuralgame;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import org.neuroph.core.NeuralNetwork;
import org.neuroph.core.learning.IterativeLearning;
import org.neuroph.core.learning.SupervisedTrainingElement;
import org.neuroph.core.learning.TrainingSet;
import org.neuroph.nnet.MultiLayerPerceptron;

import android.util.Log;

public class NeuralNetAI implements Player {
	public static final int HiddenNeurons = 16;
	public static final int NumberOfEpochs = 5;
	public static final double LearningRate = 0.2;
	public static final double ActionThreshold = 0.5;
	
	private NeuralNetwork m_network;
	
	NeuralNetAI()
	{
		m_network = new MultiLayerPerceptron((MainGame.BoardPossibilities - 1) * MainGame.NumberOfDirections, HiddenNeurons, 12);
		m_network.randomizeWeights();
		IterativeLearning t_learningRule = (IterativeLearning) m_network.getLearningRule();
		t_learningRule.setMaxIterations(NumberOfEpochs);
		t_learningRule.setLearningRate(LearningRate);
	}
	
	@Override
	public void learn(List<int[][]> boards, List<int[]> positions, List<Move> moves)
	{
		TrainingSet<SupervisedTrainingElement> t_trainingSet = new TrainingSet<SupervisedTrainingElement>();
		
		List<SightLines> t_sightLines = new ArrayList<SightLines>();
		
		for(int a = 0; a < boards.size(); a++)
		{
			t_sightLines.add(SightLines.generateSightLines(boards.get(a), positions.get(a)));
		}
		
		for(int a = 0; a < t_sightLines.size(); a++)
		{
			double[] t_input = t_sightLines.get(a).getNeuralNetInput();
			double[] t_output = new double[12];
			for(int b = 0; b < 12; b++) t_output[b] = 0.0;
			
			if(moves.get(a).m_move)
			{
				t_output[moves.get(a).m_moveDirection] = 1.0;
			}
			
			if(moves.get(a).m_fire)
			{
				t_output[4 + moves.get(a).m_fireDirection] = 1.0;
			}
			
			t_trainingSet.addElement(new SupervisedTrainingElement(t_input, t_output));
		}
		
		m_network.learn(t_trainingSet);
	}
	
	@Override
	public Move move(int[][] board, int[] position)
	{
		SightLines t_sightLines = SightLines.generateSightLines(board, position);
		double[] t_input = t_sightLines.getNeuralNetInput();
		Move t_output = new Move();
		
		m_network.setInput(t_input);
		m_network.calculate();
		double[] t_rawOutput = m_network.getOutput();
		org.newdawn.slick.util.Log.debug("input: " + Arrays.toString(t_input));
		org.newdawn.slick.util.Log.debug("output: " + Arrays.toString(t_rawOutput));
		
		int t_greatestMove = 0;
		for(int a = 1; a < 4; a++)
		{
			if(t_rawOutput[a] > t_rawOutput[t_greatestMove])
			{
				t_greatestMove = a;
			}
		}
		
		int t_greatestShot = 4;
		for(int a = 5; a < 12; a++)
		{
			if(t_rawOutput[a] > t_rawOutput[t_greatestShot])
			{
				t_greatestShot = a;
			}
		}
		
		if(t_rawOutput[t_greatestMove] > ActionThreshold)
		{
			t_output.m_move = true;
			t_output.m_moveDirection = t_greatestMove;
		}
		
		if(t_rawOutput[t_greatestShot] > ActionThreshold)
		{
			t_output.m_fire = true;
			t_output.m_fireDirection = t_greatestShot - 4;
		}
		
		return t_output;
	}
}
