import gym
from learning_to_adapt.envs.env_spec import EnvSpec
import collections
from learning_to_adapt.envs.base import Env
from learning_to_adapt.utils.serializable import Serializable

class BipedalWalkerEnv(Env, Serializable):
    def __init__(self, reset_every_episode=False):
        Serializable.quick_init(self, locals())
        self.reset_every_episode = reset_every_episode
        self.first = True
        self.env = gym.make("BipedalWalker-v2")
        self.observation = None
        self.dt = 0.01

    def step(self, action):
        if len(action) == 1:
            action = action[0]
        observation, reward, done, info = self.env.step(action)
        self.observation = observation

        return observation, reward, done, info
    
    def reward(self, obs, action, next_obs):
        assert obs.ndim == 2
        assert obs.shape == next_obs.shape
        assert obs.shape[0] == action.shape[0]
        # lb, ub = self.action_bounds
        # scaling = (ub - lb) * 0.5
        ctrl_cost = 0 # 5e-3 * np.sum(np.square(action / scaling), axis=1)
        vel = (next_obs[:, -3] - obs[:, -3]) / self.dt
        survive_reward = 0.05
        reward = vel - ctrl_cost + survive_reward
        return reward

    def reset(self):
        self.observation = self.env.reset()
        return self.observation

    @property
    def action_space(self):
        return self.env.action_space

    @property
    def observation_space(self):
        return self.env.observation_space

    def render(self):
        self.env.render()

    def log_diagnostics(self, paths):
        pass

    @property
    def spec(self):
        return EnvSpec(
            observation_space=self.observation_space,
            action_space=self.action_space,
        )

    @property
    def horizon(self):
        return self.env.spec.max_episode_steps

    def terminate(self):
        pass


if __name__ == '__main__':
    import time
    env = BipedalWalkerEnv()
    while True:
        env.reset()
        for _ in range(1000):
            observation, reward, done, info = env.step(env.action_space.sample())
            env.render()
            print(observation, reward, done, info)
            if done:
                time.sleep(1)
                env.reset()
