import os
import torch
import numpy as np
import gymnasium as gym
from train import DQN_Agent
from config import (GAME_NAME, HIDDEN_DIM, DEVICE, LEARNING_RATE, TARGET_UPDATE, TAU,
                    FINAL_EPSILON, EPSILON_DECAY, DISCOUNT_FACTOR, DQN_TYPE)

env = gym.make(GAME_NAME, render_mode="human", max_episode_steps=1500)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n

agent = DQN_Agent(
    state_dim=state_dim,
    hidden_dim=HIDDEN_DIM,
    action_dim=action_dim,
    device=DEVICE,
    learning_rate=LEARNING_RATE,
    target_update=TARGET_UPDATE,
    tau = TAU,
    epsilon=0.0, # 展示纯贪婪策略的结果
    final_epsilon=FINAL_EPSILON,
    epsilon_decay=EPSILON_DECAY,
    discount_factor=DISCOUNT_FACTOR,
    dqn_type = DQN_TYPE,
)
# 加载已有模型
results_dir = os.path.join("results", GAME_NAME)
os.makedirs(results_dir, exist_ok=True)
model_path = os.path.join(results_dir, "dqn_model.pth")
agent.load_model(model_path)

total_returns = []
for ep in range(5):
    state, _ = env.reset()
    done = False
    episode_return = 0.0
    count = 0
    while not done:
        with torch.no_grad():
            state = torch.tensor(np.expand_dims(state,0), dtype=torch.float32, device=DEVICE)
            action = agent.q_net(state).argmax(dim=1).item()
        next_state, reward, terminated, truncated, _ = env.step(action)
        count += 1
        done = terminated or truncated
        episode_return += reward
        state = next_state
    total_returns.append(episode_return)
    print(f"Episode {ep + 1}: Return = {episode_return:.2f}")
    print(f"count:{count}")
env.close()