import argparse
import os
import time

import gymnasium as gym
import numpy as np
import torch

from models import PolicyNetwork

max_steps = 1000


def parse_args():
    parser = argparse.ArgumentParser(description="Visualize trained pendulum policy")
    parser.add_argument(
        "--model_path",
        type=str,
        default="saved_models/vpg_pendulum.pth",
        help="Path to the saved model weights",
    )
    parser.add_argument(
        "--episodes", type=int, default=5, help="Number of episodes to run"
    )
    parser.add_argument(
        "--render_mode",
        type=str,
        default="human",
        choices=["human", "rgb_array"],
        help="Rendering mode",
    )
    parser.add_argument(
        "--delay", type=float, default=0.02, help="Delay between frames in seconds"
    )
    parser.add_argument("--seed", type=int, default=42, help="Random seed")
    return parser.parse_args()


def main():
    args = parse_args()

    # 设置随机种子
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    # 检测可用设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device} for inference")

    # 创建环境
    env = gym.make(
        "Pendulum-v1", render_mode=args.render_mode, max_episode_steps=max_steps
    )
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.shape[0]
    action_bound = env.action_space.high[0]  # Pendulum的动作范围是[-2, 2]

    # 创建策略网络
    policy = PolicyNetwork(state_dim, action_dim)  # 不再传递action_bound

    # 加载训练好的权重
    if not os.path.exists(args.model_path):
        print(f"Error: Model file {args.model_path} does not exist.")
        return

    # 加载模型并处理设备差异
    # 使用map_location确保无论权重保存在何种设备上，都能正确加载
    state_dict = torch.load(args.model_path, map_location=device)
    policy.load_state_dict(state_dict)
    policy.to(device)  # 确保模型在正确的设备上
    policy.eval()  # 设置为评估模式

    print(f"Model loaded from {args.model_path}")
    print(f"Running {args.episodes} episodes...")

    # 运行多个episode
    for episode in range(args.episodes):
        state = env.reset(seed=args.seed + episode)[0]
        total_reward = 0
        steps = 0
        done = False

        print(f"\nEpisode {episode + 1}/{args.episodes}")

        while not done:
            # 选择动作
            # 在推理时使用确定性动作
            with torch.no_grad():
                state_tensor = torch.FloatTensor(state).to(device)

                # 使用确定性推理获得最佳性能
                deterministic = True
                action_result = policy.get_action(
                    state_tensor, deterministic=deterministic
                )

                # 根据返回类型处理结果
                if deterministic:
                    action = action_result
                else:
                    action, _ = action_result

                # 缩放动作到环境范围
                scaled_action = action * action_bound

            # 执行动作
            next_state, reward, terminated, truncated, _ = env.step(scaled_action)
            done = terminated or truncated

            total_reward += reward
            steps += 1
            state = next_state

            # 控制渲染帧率
            if args.render_mode == "human":
                time.sleep(args.delay)

        print(
            f"Episode {episode + 1} finished after {steps} steps with reward {total_reward:.2f}"
        )

    env.close()
    print("\nVisualization completed!")


if __name__ == "__main__":
    main()
