# -*- coding: utf-8 -*-

# Copyright (c) 2010 Jérémie DECOCK (http://www.jdhp.org)

from naive_agent import NaiveAgent
import numpy as np
import random
import os

class SimpleEnvironment:

    agent = None
    gui = None

    state_list = ["1", "2", "3"]
    action_list = ["left", "right"]

    initial_state = "1"
    terminal_state_list = ["3"]

    transition_model = None

    def __init__(self, agent):
        self.agent = agent
        self.agent.state_list  = self.state_list
        self.agent.action_list = self.action_list

        # Build the transition model
        self.transition_model = np.zeros([len(self.state_list),
                                          len(self.action_list),
                                          len(self.state_list)])
        # 1 -left-> 
        self.transition_model[0, 0, 0] = 0.1
        self.transition_model[0, 0, 1] = 0.8
        self.transition_model[0, 0, 2] = 0.1
        # 1 -right-> 
        self.transition_model[0, 1, 0] = 0.1
        self.transition_model[0, 1, 1] = 0.1
        self.transition_model[0, 1, 2] = 0.8
        # 2 -left-> 
        self.transition_model[1, 0, 0] = 0.1
        self.transition_model[1, 0, 1] = 0.1
        self.transition_model[1, 0, 2] = 0.8
        # 2 -right-> 
        self.transition_model[1, 1, 0] = 0.8
        self.transition_model[1, 1, 1] = 0.1
        self.transition_model[1, 1, 2] = 0.1
        # 3 -left-> 
        self.transition_model[2, 0, 2] = 1.0
        # 3 -right-> 
        self.transition_model[2, 1, 2] = 1.0

        self.check_transition_model()

    def run(self):
        "The main loop."
        state = self.initial_state
        reward = None
        terminated = False
        
        while not terminated:
            action = self.agent.get_action(state, reward)
            former_state, state, terminated = self.execute_action(state, action)
            reward = self.reward(former_state, action, state)
            self.update_ui()

    def execute_action(self, state, action):
        state_index = self.state_list.index(state)
        action_index = self.action_list.index(action)

        probability_distribution = self.transition_model[state_index,
                                                         action_index]
        cumsum_distribution = probability_distribution.cumsum()

        random_number = random.random()

        index = 0
        done = False
        while index < len(cumsum_distribution) and not done:
            if random_number <= cumsum_distribution[index]:
                done = True
            else:
                index += 1

        former_state = state
        state = self.state_list[index]
        terminated = state in self.terminal_state_list
        return former_state, state, terminated

    def transition(self, src_state, action, dst_state):
        """Transition model.

        Return the probability to reach the state 'dst_state' if an action
        'action' is done in the state 'src_state'."""
        src_state_index = self.state_list.index(src_state)
        action_index    = self.action_list.index(action)
        dst_state_index = self.state_list.index(dst_state)
        return self.transition_model[src_state_index,
                                     action_index,
                                     dst_state_index]

    def check_transition_model(self):
        prob_sum = self.transition_model.sum(2)
        if prob_sum.max() != 1 or prob_sum.min() != 1:
            raise ValueError("Wrong transition model.")

    #def reward(self, state, former_state=None, action=None):
    def reward(self, state):
        reward = 0

        if state in self.terminal_state_list:
            reward = -1
        else:
            reward = 0.1

        return reward

    def update_ui(self):
        """Update the user interface."""
        print self.agent.state_history[-1], " > ", self.agent.action_history[-1]

    def draw_transition_model(self, filename='transition_model'):
        file = open(filename + '.dot', 'w')
        file.write('digraph transition_model {\n')

        # Declare state nodes
        file.write('    node [shape=ellipse];\n')
        for state in self.state_list:
            file.write('    "%s";\n' % (state))

        # Declare start state node
        file.write('    "%s" [shape=Mcircle];\n' % self.initial_state)

        # Declare final state nodes
        for state in self.terminal_state_list:
            file.write('    "%s" [shape=doublecircle];\n' % (state))

        # Declare state-action nodes
        file.write('\n')
        file.write('    node [shape=plaintext];\n')
        for state in self.state_list:
            for action in self.action_list:
                file.write('    "%s-%s" [label="%s"];\n' % (state, action, action))

        # Make links between source_states and actions
        file.write('\n')
        for src_state in self.state_list:
            for action in self.action_list:
                file.write('    "%s" -> "%s-%s" [arrowhead="none", style="bold"];\n' % (src_state, src_state, action))

                for dst_state in self.state_list:
                    src_state_index = self.state_list.index(src_state)
                    action_index = self.action_list.index(action)
                    dst_state_index = self.state_list.index(dst_state)

                    probability = self.transition_model[src_state_index,
                                                        action_index,
                                                        dst_state_index]
                    if probability > 0:
                        file.write('    "%s-%s" -> "%s" [label="%1.2f", fontsize="7", color="gray"];\n' % (src_state, action, dst_state, probability))

        file.write('}')

        file.close()

        os.system('circo -Tpng %s.dot -o %s.png' % (filename, filename))


if __name__ == '__main__':
    environment = SimpleEnvironment(NaiveAgent())
    environment.run()

