import gymnasium as gym
import ale_py
from PIL import Image
import os
import numpy as np
from typing import Any

class SkipNoChangeEnv(gym.Wrapper):
    # 这里会将传入的游戏动作重复执行，直到达到skip次
    # 该包装器的目的是:
# 跳过一些帧，即每次不执行每一帧，而是执行每skip帧，这可以加速学习过程，因为在连续的几帧中，游戏的状态可能变化不大。
# 使用最大池化技术来选择两个连续帧之间的最大像素值。这是为了解决Atari游戏的闪烁问题，其中某些对象可能不会出现在每一帧中。
    def __init__(self, env=None, max_skip=60):
        """Return only every `skip`-th frame"""
        super(SkipNoChangeEnv, self).__init__(env)
        self.pre_obs = None
        self.max_skip = max_skip
        self.cur_skip = 0

    def step(self, action):
        total_reward = 0.0
        done = None
        truncated = None
        # 重复执行相同的动作skip次
        
        while self.cur_skip < self.max_skip:
            obs, reward, done, truncated, info = self.env.step(action)
            total_reward += reward
            if done or truncated:
                self.pre_obs = obs
                break

            if self.pre_obs is None or not np.array_equal(obs, self.pre_obs):
                self.pre_obs = obs
                break
            
            self.cur_skip += 1


        self.cur_skip = 0
        return obs, total_reward, done, truncated, info

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        """Clear past frame buffer and init. to first obs. from inner env."""
        obs, info = self.env.reset(seed=seed, options=options)
        self.pre_obs = obs
        self.cur_skip = 0
        return obs, info

class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        # if reward != 0:
        #     reward /= 10.0 # 缩放奖励
        
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info

gym.register_envs(ale_py)

# Initialize the environment
# env = RewardPenaltyWrapper(gym.make('ALE/Blackjack-v5', render_mode="human"))
env = SkipNoChangeEnv(gym.make('ALE/Blackjack-v5', frameskip=1, render_mode="rgb_array"))

# Reset the environment to get the initial state
state = env.reset()
total_reward = 0
# Run a loop to play the game
for step in range(10000):
    # Take a random action
    action = env.action_space.sample()

    # Get the next state, reward, done flag, and info from the environment
    print("action: ", action)
    state, reward, done, trunc, info = env.step(action)
    total_reward += reward
    img = Image.fromarray(state)
    img.save(os.path.join('./', f"step_{step:04d}_{reward}.png"))
    if reward != 0:
        print("reward: ", reward)
        print("info: ", info)


    # If done, reset the environment
    if done or trunc:
        break

print("Total reward: ", total_reward)

# Close the environment
env.close()