'''
Total reward:  800.0
mean reward:  8.0
'''

import gymnasium as gym
import ale_py
import numpy as np

gym.register_envs(ale_py)

if __name__ == "__main__":
    # Initialize the environment
    # env = gym.make('ALE/MarioBros-v5', render_mode='human')
    env = gym.make('ALE/MarioBros-v5')
    # env = gym.vector.AsyncVectorEnv([lambda: gym.make('ALE/MarioBros-v5', render_mode='rgb_array', frameskip=4, repeat_action_probability=0.0) for _ in range(8)])
    # env = common.ProcessFrame(gym.make('ALE/MarioBros-v5', render_mode='human'))
    # env = gym.make('ALE/MarioBros-v5, render_mode='rgb_array', frameskip=4, repeat_action_probability=0.0)
    # print("max max_episode_steps:", env.spec.max_episode_steps)
    count_frame = 0

    # Reset the environment to get the initial state
    total_reward = 0
    # Run a loop to play the game
    episoid = 1000
    state, info = env.reset()
    for _ in range(episoid):
        
        while True:
            # Take a random action
            # env.render()
            action = env.action_space.sample()

            # Get the next state, reward, done flag, and info from the environment
            state, reward, done, trunc, info = env.step(action)
            if np.sum(reward) != 0:
                total_reward += reward
                print("action: ", action)   
                print("reward: ", reward)
                print("info: ", info)

            # If done, reset the environment
            if np.any(done) or np.any(trunc):
            #     print("info: ", info)
            #     print("count_frame: ", count_frame)
                break

    print("Total reward: ", total_reward)
    print("mean reward: ", total_reward / episoid)

    # Close the environment
    env.close()