import numpy as np

class DoomWrapper(object):

    def __init__(self,env):
        self.env = env

    def new_episode(self):
        return self.env.new_episode()

    def get_state(self):
        return self.env.get_state()

    def make_action(self,action):
        reward = self.env.make_action(action)
        done = self.env.is_episode_finished()
        if not done:
            next_state = self.env.get_state().screen_buffer
        else:
            next_state = np.zeros((3,240,320)).astype(np.float32)
        return next_state, reward, done, None

    def get_total_reward(self):
        return self.env.get_total_reward()

    def is_episode_finished(self):
        return self.env.is_episode_finished()

    def close(self):
        return self.env.close()


class MaxAndSkipEnv(DoomWrapper):
    def __init__(self, env, skip=4):
        """Return only every `skip`-th frame"""
        DoomWrapper.__init__(self, env)
        # most recent raw observations (for max pooling across time steps)
        self._obs_buffer = np.zeros((2,)+(32,240), dtype=np.uint8)
        self._skip       = skip

    def new_episode(self):
        return self.env.new_episode()

    def is_episode_finished(self):
        return self.env.is_episode_finished()

    def make_action(self, action):
        """Repeat action, sum reward, and max over last observations."""
        total_reward = 0.0
        done = None
        for i in range(self._skip):
            reward = self.env.make_action(action)
            done = self.is_episode_finished()
            next_state = self.env.get_state().screen_buffer
            if i == self._skip - 2: self._obs_buffer[0] = next_state
            if i == self._skip - 1: self._obs_buffer[1] = next_state
            total_reward += reward
            if done:
                break
        # Note that the observation on the done=True frame
        # doesn't matter
        max_frame = self._obs_buffer.max(axis=0)

        return max_frame, total_reward, done, None

    def reset(self, **kwargs):
        return self.env.reset(**kwargs)
