import gymnasium as gym
# Import necessary libraries
from matplotlib import animation, pyplot as plt

import torch
import torch.nn as nn
import torch.nn.functional as F

# if GPU is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Deuling Network
class DQN(nn.Module):

    def __init__(self, n_observations: int, n_actions: int, hidden_num: int):
        super(DQN, self).__init__()

        self.n_observations = n_observations
        self.n_actions = n_actions

        self.features_layer = nn.Linear(n_observations, hidden_num) # feature layer

        self.advantage_hidden_layer = nn.Linear(hidden_num, hidden_num) # advantages layer 1
        self.advantage_layer = nn.Linear(hidden_num, n_actions) #  advantages layer 2

        self.value_hidden_layer = nn.Linear(hidden_num, hidden_num)
        self.value_layer = nn.Linear(hidden_num, 1) # value layer 2

    # Called with either one element to determine next action, or a batch
    # during optimization. Returns tensor([[left0exp,right0exp]...]).
    def forward(self, x):
        # Dueling network
        x = F.relu(self.features_layer(x))

        advantage_x = F.relu(self.advantage_hidden_layer(x))
        advantage = self.advantage_layer(advantage_x)

        value_x = F.relu(self.value_hidden_layer(x))
        value = self.value_layer(value_x)

        q = value + advantage - advantage.mean(dim=-1, keepdim=True)

        return q
    
    
def select_action(state):
    with torch.no_grad():
        # t.max(1) will return the largest column value of each row.
        # second column on max result is index of where max element was
        # found, so we pick action with the larger expected reward.
        # print(policy_net(state))
        return policy_net(state).max(1).indices.view(1, 1)


def display_frames_as_gif(frames):
    """Displays a list of frames as a gif, with controls."""
    fig, ax = plt.subplots()
    # print(f"frames[0][0] is {frames[0][0]}")
    print(f"len {len(frames)}")
    patch = ax.imshow(frames[0])
    plt.axis('off')

    def animate(i):
        patch.set_data(frames[i])

    anim = animation.FuncAnimation(
        fig, animate, frames=len(frames), interval=50, repeat=False
    )

    # saving to m4 using ffmpeg writer
    writervideo = animation.FFMpegWriter(fps=25)
    anim.save(f'{HOME}/videos/best_{env_name}_animation.mp4', writer=writervideo)

import os
if __name__ == '__main__':

    # 从保存的actor 和 critic 文件加载参数
    HOME = os.path.dirname(os.path.realpath(__file__))
    env_name = "CartPole-v1"
    checkpoint = torch.load(f'{HOME}/models/best_{env_name}_model.pt')
    print(checkpoint.keys())

    # environment
    env = gym.make(f"{env_name}", max_episode_steps=500, render_mode='rgb_array')
    
    # Get number of actions from gym action space
    n_actions = env.action_space.n
    # print(n_actions)
    # Get the number of state observations
    n_observations = env.observation_space.shape[0]

    policy_net = DQN(n_observations, n_actions, hidden_num=128).to(device)
    policy_net.load_state_dict(checkpoint['policy_net'])
    policy_net.eval()

    frames = []
    score = 0
    done = False
    # Initialize the environment and get its state
    state, info = env.reset()
    # print(state)
    state = torch.tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
    while not done:
        frames.append(env.render())
        action = select_action(state)
        # print(f'action is {action}')
        observation, reward, terminated, truncated, info = env.step(action.item())

        done = terminated or truncated
        next_state = torch.tensor(observation, dtype=torch.float32, device=device).unsqueeze(0)
        # Move to the next state
        state = next_state
        score += reward
        
    print("score: ", score)
    env.close()

    # Assuming you have the 'frames' variable defined somewhere in your code
    display_frames_as_gif(frames)
