'''
随机100局平均分：mean reward:  15.8
'''

import gymnasium as gym
from PIL import Image
from gymnasium import spaces
import ale_py
import numpy as np
import cv2
from lib import common

def save_state_as_image(state, filename):
    """Save the state as a PNG image."""
    # Ensure the state is a NumPy array with dtype uint8
    if state.dtype != np.uint8:
        # If state is float, scale to [0, 255] and convert to uint8
        state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
    # Remove extra dimensions if necessary
    state = state.squeeze()
    # Create image
    img = Image.fromarray(state)
    # Convert image to mode 'L' (grayscale) if it's not compatible
    if img.mode not in ('L', 'RGB'):
        img = img.convert('L')
    # Save image
    img.save(filename)

class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        if reward != 0:
            reward /= 10.0 # 缩放奖励
        
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            reward -= self.life_loss_penalty
            self.previous_lives = current_lives
        
        return obs, reward, done, truncated, info

gym.register_envs(ale_py)

# Initialize the environment
# env = RewardPenaltyWrapper(gym.make('ALE/BankHeist-v5', render_mode="human"))
# env = gym.make('ALE/BankHeist-v5', render_mode='human')
env = gym.make('ALE/BankHeist-v5')
# env = ProcessFrame84(gym.make('ALE/BankHeist-v5', render_mode='rgb_array', frameskip=4, repeat_action_probability=0.0))
# env = gym.make('ALE/BankHeist-v5', render_mode='rgb_array', frameskip=4, repeat_action_probability=0.0)
print("max max_episode_steps: ", env.spec.max_episode_steps)

count_frame = 0
# Reset the environment to get the initial state
total_reward = 0
# Run a loop to play the game
episoid = 100
for _ in range(episoid):
    state = env.reset()
    while True:
        # Take a random action
        # env.render()
        action = env.action_space.sample()

        # Get the next state, reward, done flag, and info from the environment
        state, reward, done, trunc, info = env.step(action)
        total_reward += reward
        if reward != 0:
            print("action: ", action)
            print("reward: ", reward)
            print("info: ", info)

        # If done, reset the environment
        if done or trunc:
            print("done or trunc:: ", done or trunc)
            print("info: ", info)
            print("count_frame: ", count_frame)
            break

print("Total reward: ", total_reward)
print("mean reward: ", total_reward / episoid)

# Close the environment
env.close()