package nl.rug.ml.dobbelen.learning;

import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import nl.rug.ml.dobbelen.agent.DobbelenAgent;
import nl.rug.ml.dobbelen.game.Dice;
import nl.rug.ml.dobbelen.game.DobbelenCondition;
import nl.rug.ml.dobbelen.game.DobbelenGame;

import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;

/**
 * @author Do Bich Ngoc (bichngocdo89@gmail.com)
 * 
 */
public class DobbelenLearningAgent extends DobbelenAgent {

	private double learningRate = 0.15;
	private double discountRate = 1.0;

	private Multimap<DobbelenState, DobbelenAction> stateActionMap;

	public DobbelenLearningAgent(int numDice) {
		super(numDice);
		stateActionMap = HashMultimap.create();
	}

	public double getLearningRate() {
		return learningRate;
	}

	public void setLearningRate(double learningRate) {
		this.learningRate = learningRate;
	}

	public double getDiscountRate() {
		return discountRate;
	}

	public void setDiscountRate(double discountRate) {
		this.discountRate = discountRate;
	}

	private void clearMemory() {
		stateActionMap = HashMultimap.create();
	}

	public void play() {
		reset();

		throwDices();
		DobbelenState currentState = currentState();
		do {
			DobbelenAction currentAction = maxValueAction(currentState);
			takeAction(currentState, currentAction);
			currentState = currentState();
		}
		while (totalScore < DobbelenGame.GOAL_SCORE);
		System.out.println("Total score: " + totalScore + " row: " + numRow);
	}

	public void play(ExploreStratergy stratergy) {
		reset();

		throwDices();
		DobbelenState currentState = currentState();
		do {
			DobbelenAction currentAction = stratergy.chooseAction(currentState);
			takeAction(currentState, currentAction);
			currentState = currentState();
		}
		while (totalScore < DobbelenGame.GOAL_SCORE);
		System.out.println("Total score: " + totalScore + " row: " + numRow);
	}

	public void trainQLearning(ExploreStratergy stratergy, int numIterators) {
		reset();
		clearMemory();
		stratergy.setStateActionMap(stateActionMap);

		throwDices();
		DobbelenState currentState = currentState();
		for (int i = 0; i < numIterators; i++) {
			DobbelenAction currentAction = stratergy.chooseAction(currentState);
			int reward = takeAction(currentState, currentAction);
			DobbelenState state = currentState();
			double value = currentAction.getValue() + learningRate * (reward + discountRate * maxValueAction(state).getValue() - currentAction.getValue());
			currentAction.setValue(value);
			currentState = state;
		}
	}

	public void trainSarsa(ExploreStratergy stratergy, int numIterators) {
		reset();
		clearMemory();
		stratergy.setStateActionMap(stateActionMap);

		throwDices();
		DobbelenState currentState = currentState();
		DobbelenAction currentAction = stratergy.chooseAction(currentState);
		for (int i = 0; i < numIterators; i++) {
			int reward = takeAction(currentState, currentAction);
			DobbelenState state = currentState();
			DobbelenAction action = stratergy.chooseAction(state);
			double value = currentAction.getValue() + learningRate * (reward + discountRate * action.getValue() - currentAction.getValue());
			currentAction.setValue(value);
			currentState = state;
			currentAction = action;
		}
	}

	public void trainSarsa(ExploreStratergy stratergy, double lamda, int numIterators) {
		reset();
		clearMemory();
		stratergy.setStateActionMap(stateActionMap);
		Set<DobbelenAction> visitedActions = new HashSet<DobbelenAction>();

		throwDices();
		DobbelenState currentState = currentState();
		DobbelenAction currentAction = stratergy.chooseAction(currentState);
		for (int i = 0; i < numIterators; i++) {
			int reward = takeAction(currentState, currentAction);
			DobbelenState state = currentState();
			DobbelenAction action = stratergy.chooseAction(state);
			double delta = reward + discountRate * action.getValue() - currentAction.getValue();
			currentAction.setEgibility(1.0);
			visitedActions.add(currentAction);

			for (DobbelenAction visitedAction : visitedActions) {
				visitedAction.setValue(visitedAction.getValue() + learningRate * delta * visitedAction.getEgibility());
				visitedAction.setEgibility(discountRate * lamda * visitedAction.getEgibility());
			}

			currentState = state;
			currentAction = action;
		}
	}

	private DobbelenAction maxValueAction(DobbelenState state) {
		Collection<DobbelenAction> actions = stateActionMap.get(state);
		return Collections.max(actions, DobbelenAction.DOBBELEN_ACTION_COMPARATOR);
	}

	private DobbelenState currentState() {
		DobbelenState state = new DobbelenState(dices);
		if (!stateActionMap.containsKey(state)) {
			stateActionMap.putAll(state, state.generateActions());
		}
		return state;
	}

	private int takeAction(DobbelenState state, DobbelenAction action) {
		List<Dice> heldDices = state.actionToDice(action);
		if (heldDices.size() == dices.size()) {
			updateScore();
			int previousScore = currentScore;
			throwDices();
			return previousScore;
		}
		else {
			reThrowDices(heldDices);
			if (DobbelenCondition.newDicesYeldNoScore(dices, heldDices)) {
				throwDices();
				return 0;
			}
			else {
				return currentScore;
			}
		}

	}

	@Override
	public boolean willReThrow() {
		DobbelenState currentState = currentState();
		DobbelenAction currentAction = maxValueAction(currentState);
		List<Dice> heldDices = currentState.actionToDice(currentAction);
		return heldDices.size() != dices.size();
	}

	@Override
	public List<Dice> chooseDicesToHold() {
		DobbelenState currentState = currentState();
		DobbelenAction currentAction = maxValueAction(currentState);
		return currentState.actionToDice(currentAction);
	}

	public static void main(String[] args) {
		ExploreStratergy stratergy = new EGreedyExploreStratergy(0.9);
		stratergy.setUpdateStep(20000);
		stratergy.setUpdateRate(0.95);

		DobbelenLearningAgent agent = new DobbelenLearningAgent(6);
		agent.trainSarsa(stratergy, 1000000);
		for (int i = 0; i < 1000; i++) {
			agent.play();
		}
	}

}
