import gymnasium as gym
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from matplotlib import animation


# 与训练代码相同的Actor网络结构
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dims=[128, 128]):
        super(Actor, self).__init__()

        # 构建特征提取网络
        layers = []
        input_dim = state_dim

        for hidden_dim in hidden_dims:
            layers.append(nn.Linear(input_dim, hidden_dim))
            layers.append(nn.ReLU())
            layers.append(nn.LayerNorm(hidden_dim))
            input_dim = hidden_dim

        self.feature_extractor = nn.Sequential(*layers)

        # 均值和标准差输出头
        self.mu_head = nn.Linear(hidden_dims[-1], action_dim)
        self.log_std_head = nn.Linear(hidden_dims[-1], action_dim)

    def forward(self, x):
        x = self.feature_extractor(x)
        mu = torch.tanh(self.mu_head(x))  # 输出在[-1, 1]之间
        log_std = self.log_std_head(x)
        log_std = torch.clamp(log_std, -20, 2)  # 防止标准差过大或过小
        std = torch.exp(log_std)
        return mu, std

    def get_action(self, state, deterministic=False):
        mu, std = self.forward(state)
        if deterministic:
            return mu
        else:
            from torch.distributions.normal import Normal

            dist = Normal(mu, std)
            action = dist.sample()
            return action, dist.log_prob(action).sum(dim=-1), dist.entropy().sum(dim=-1)


def save_frames_as_gif(frames, filename="pendulum_ppo.gif"):
    """
    将帧列表保存为动画GIF
    """
    # 使用matplotlib的animation功能
    plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi=72)
    patch = plt.imshow(frames[0])
    plt.axis("off")

    def animate(i):
        patch.set_data(frames[i])

    anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50)
    anim.save(filename, writer="imagemagick", fps=30)
    print(f"Animation saved as {filename}")


def run_inference():
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    # 初始化环境
    env = gym.make("Pendulum-v1", render_mode="human")
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.shape[0]
    action_bound = env.action_space.high[0]  # Pendulum的动作范围是[-2, 2]

    # 创建Actor网络并加载权重
    actor = Actor(state_dim, action_dim).to(device)
    actor.load_state_dict(
        torch.load("saved_models/ppo_actor_pendulum.pth", map_location=device)
    )
    actor.eval()  # 设置为评估模式

    # 运行几个episode进行演示
    num_episodes = 3
    max_steps = 1000
    all_rewards = []
    frames = []

    for episode in range(num_episodes):
        state = env.reset()[0]
        episode_reward = 0

        for step in range(max_steps):
            # 渲染当前帧
            frame = env.render()
            if episode == 0:  # 只保存第一个episode的帧
                frames.append(frame)

            # 选择动作（使用确定性策略）
            state_tensor = torch.FloatTensor(state).to(device)
            with torch.no_grad():
                action = actor.get_action(state_tensor, deterministic=True)
                # 确保action是正确的张量/数组
                if isinstance(action, tuple):
                    action = action[0]

                # 缩放动作到环境的范围
                scaled_action = action * action_bound
                scaled_action = scaled_action.cpu().numpy()

            # 执行动作
            next_state, reward, terminated, truncated, _ = env.step(scaled_action)
            done = terminated or truncated

            episode_reward += reward
            state = next_state

            if done:
                break

        all_rewards.append(episode_reward)
        print(f"Episode {episode + 1}, Reward: {episode_reward:.2f}")

    # # 保存动画
    # if frames:
    #     save_frames_as_gif(frames)

    env.close()
    print(f"Average reward over {num_episodes} episodes: {np.mean(all_rewards):.2f}")


if __name__ == "__main__":
    run_inference()
