#!/usr/bin/env python3
import argparse
import gymnasium as gym
import os
from PIL import Image
import numpy as np
from collections import deque
from typing import Any
import lib.common as common
ENV_ID = "Pendulum-v1"

class MaxAndSkipEnv(gym.Wrapper):
    # 这里会将传入的游戏动作重复执行，直到达到skip次
    # 该包装器的目的是:
# 跳过一些帧，即每次不执行每一帧，而是执行每skip帧，这可以加速学习过程，因为在连续的几帧中，游戏的状态可能变化不大。
# 使用最大池化技术来选择两个连续帧之间的最大像素值。这是为了解决Atari游戏的闪烁问题，其中某些对象可能不会出现在每一帧中。
    def __init__(self, env=None, skip=3):
        """Return only every `skip`-th frame"""
        super(MaxAndSkipEnv, self).__init__(env)
        # most recent raw observations (for max pooling across time steps)
        self._obs_buffer = deque(maxlen=2) # 最多保存2帧的观测数据，用于最大池化
        # 执行动作的次数
        self._skip = skip

    def step(self, action):
        total_reward = 0.0
        done = None
        truncated = None
        # 重复执行相同的动作skip次
        for _ in range(self._skip):
            obs, reward, done, truncated, info = self.env.step(action)
            # 存储最近的maxLen次观测值
            self._obs_buffer.append(obs)
            # 累计总奖励
            total_reward += reward
            if done:
                #如果游戏结束则跳出循环
                break

        # 将多次观测到的游戏环境组合成一次观测值
        # 即将游戏的多帧组合成一帧，这里相同的是两帧组合成一帧
        # 使用最大池化技术（通过np.max）来合并_obs_buffer中的最后两帧。这有助于解决Atari游戏的某些对象可能在连续的帧中闪烁的问题
        max_frame = np.max(np.stack(self._obs_buffer), axis=0)

        return max_frame, total_reward, done, truncated, info

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        """Clear past frame buffer and init. to first obs. from inner env."""
        # 重置，清空缓存
        self._obs_buffer.clear()
        obs, info = self.env.reset(seed=seed, options=options)
        self._obs_buffer.append(obs)
        return obs, info

if __name__ == "__main__":

    # env = MaxAndSkipEnv(gym.make(ENV_ID, g=9.81, render_mode="rgb_array"))
    env = common.wrapper_env(gym.make(ENV_ID, g=9.81, render_mode='rgb_array'))

    obs, _ = env.reset()
    total_reward = 0.0
    total_steps = 0
    while True:
        action = env.action_space.sample()  # Sample random action
        obs, reward, done, trunc, info = env.step(action)
        done = done or trunc
        total_reward += reward
        total_steps += 1
        # o = env.render()
        # img = Image.fromarray(o)
        # img.save("img_%05d.png" % total_steps)
        print("reward: ", reward)
        print("info: ", info)
        if done:
            break
    print("In %d steps we got %.3f reward" % (total_steps, total_reward))
