'''
游戏特点：

'''

import gymnasium as gym
import ale_py
from PIL import Image
from gymnasium import spaces
import numpy as np
import cv2


def save_state_as_image(state, filename):
    """Save the state as a PNG image."""
    # Ensure the state is a NumPy array with dtype uint8
    if state.dtype != np.uint8:
        # If state is float, scale to [0, 255] and convert to uint8
        state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
    # Remove extra dimensions if necessary
    state = state.squeeze()
    # Create image
    img = Image.fromarray(state)
    # Convert image to mode 'L' (grayscale) if it's not compatible
    if img.mode not in ('L', 'RGB'):
        img = img.convert('L')
    # Save image
    img.save(filename)


count_frame = 0

class ProcessFrame84(gym.ObservationWrapper):
    """
    将游戏画面（观察空间）转换为84*84的灰度图片
    """
    
    def __init__(self, env=None):
        super(ProcessFrame84, self).__init__(env)
        # 创建新的观察空间，值范围0~255的单通道（84*84）尺寸的图片
        self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)

    def observation(self, obs):
        """
        将观察状态进行转换
        """
        return ProcessFrame84.process(obs)

    @staticmethod
    def process(img):
        global count_frame
        img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
        x_t = cv2.resize(img, (84, 84), interpolation=cv2.INTER_AREA)
        # save_state_as_image(x_t, r'D:\Projects\Python\my_-nqd\state_image.png')
        x_t = np.reshape(x_t, [84, 84, 1])
        count_frame += 1
        return x_t.astype(np.uint8)


class RewardPenaltyWrapper(gym.Wrapper):
    def __init__(self, env, frame_penalty=-0.1, life_loss_penalty=-10):
        super(RewardPenaltyWrapper, self).__init__(env)
        self.frame_penalty = frame_penalty
        self.life_loss_penalty = life_loss_penalty
        self.previous_lives = 0

    def reset(self, **kwargs):
        obs, info = self.env.reset(**kwargs)
        self.previous_lives = info.get('lives', 0)  # 初始生命值
        return obs, info

    def step(self, action):
        obs, reward, done, truncated, info = self.env.step(action)

        if reward != 0:
            reward //= 10
        
        # 处理生命减少时的惩罚
        current_lives = info.get('lives', self.previous_lives)
        if current_lives < self.previous_lives:
            reward += self.life_loss_penalty
            self.previous_lives = current_lives
        elif current_lives > self.previous_lives:
            reward -= self.life_loss_penalty
            self.previous_lives = current_lives
        
        
        return obs, reward, done, truncated, info

gym.register_envs(ale_py)

# Initialize the environment
# env = gym.make('ALE/DonkeyKong-v5', render_mode='human')
env = gym.make('ALE/DonkeyKong-v5')
# env = gym.make('ALE/DonkeyKong-v5', render_mode='rgb_array', frameskip=4, repeat_action_probability=0.0)
print("max max_episode_steps: ", env.spec.max_episode_steps)

# Reset the environment to get the initial state
total_reward = 0
# Run a loop to play the game
episoid = 100
for _ in range(episoid):
    state = env.reset()
    while True:
        # Take a random action
        # env.render()
        action = env.action_space.sample()

        # Get the next state, reward, done flag, and info from the environment
        state, reward, done, trunc, info = env.step(action)
        total_reward += reward
        if reward != 0:
            print("action: ", action)
            print("reward: ", reward)
            print("info: ", info)

        # If done, reset the environment
        if done or trunc:
            print("done or trunc:: ", done or trunc)
            print("info: ", info)
            print("count_frame: ", count_frame)
            break

print("Total reward: ", total_reward)
print("mean reward: ", total_reward / episoid)

# Close the environment
env.close()