from .memory import ReplayMemory
import torch

class Observer:
    def __init__(self):
        pass

    def on_reset(self, state):
        print('on_reset: {state}')

    def on_action(self, state, action, next_state, reward, done, info):
        print(f'on_action: action = {action}, reward = {reward}, next_state = {next_state}')

    def on_done(self, steps):
        print(f'on_done: finished after {steps} steps.')

class Count:
    def on_reset(self, state):
        self.count = 0

    def on_action(self, state, action, next_state, reward, done, info):
        self.count += 1


def _as_tensor(x):
    if isinstance(x, torch.Tensor):
        return x
    return torch.tensor(x)

class MemorizedObserver(Observer):
    def __init__(self, *args, **kwargs):
        self.memory = ReplayMemory(*args, **kwargs)

    def on_reset(self, state):
        self.state = state

    def on_action(self, state, action, next_state, reward, done, info):
        self.memory.push(state, action, next_state, _as_tensor(reward))
