﻿import numpy as np
from agent.Dqn_28_28.agent import DQNAgent
from env.env import Env
import torch
import os
import matplotlib.pyplot as plt
from PIL import Image

TrainingTimes = 8
output_dir = os.path.join("./agent", f"Dqn_28_28", f"{TrainingTimes}")
if not os.path.exists(output_dir):
    os.makedirs(output_dir)
log_file_path = os.path.join(output_dir, f"training_log.txt")


def save_episode_image(state_tensor, episode, reward, loss, path):
    """将一个 episode 的最终状态保存为图像"""
    # 1. 确保文件夹存在
    if not os.path.exists(path):
        os.makedirs(path)

    # 2. 将 Tensor 转换为 NumPy 数组
    # .cpu() - 将数据移至CPU
    # .numpy() - 转换为NumPy数组
    image_np = state_tensor.cpu().numpy()

    # 3. 确保数据类型是 uint8 (0-255 的整数)，这是图像文件的标准格式
    # 如果你的 tensor 是 float 类型 (0.0-1.0)，需要先乘以 255
    if image_np.dtype != np.uint8:
        image_np = image_np.astype(np.uint8)

    # 4. 从 NumPy 数组创建 Pillow Image 对象
    # 'L' 模式代表灰度图像 (Luminance)
    img = Image.fromarray(image_np, mode="L")

    # 5. 保存图像
    img.save(os.path.join(path, f"episode_{episode}.png"))


NUM_EPISODES = 100
VISUALIZE_EVERY = 10  # 每 10 个 episodes 保存一次图像
SAVE_MODEL_EVERY = 50  # 每 50 个 episodes 保存一次模型
env = Env(max_steps=1000)
load_model_path = os.path.join(
    "./agent", f"Dqn_28_28", f"{TrainingTimes-1}", f"q_network_episode_100.pth"
)
# load_model_path = None
agent = DQNAgent(
    image_dims=(28, 28),
    load_model_path=load_model_path,
)
for i_episode in range(NUM_EPISODES):
    state = env.reset()
    total_reward = 0
    done = False
    episode_losses = []

    while not done:
        # Agent 根据当前状态选择动作
        action = agent.choose_action(state)

        # 环境执行动作并返回结果
        next_state, reward, done, _ = env.step(action)
        total_reward += reward

        # 将动作元组转换为张量以存储
        action_tensor = torch.tensor(action, dtype=torch.long)

        # 将经验存储到记忆库
        # 注意：state 和 next_state 已经是 tensor
        # reward 是 float, done 是 bool
        agent.memory.push(state, action_tensor, reward, next_state, done)

        # 更新当前状态
        state = next_state

        # Agent 进行学习
        loss = agent.update()
        if loss is not None:
            episode_losses.append(loss)

    # 在每个 episode 结束时，软更新 target network
    agent.update_target_net()

    avg_loss = sum(episode_losses) / len(episode_losses) if episode_losses else None
    loss_str = f"Avg Loss: {avg_loss:.4f}" if avg_loss is not None else "Avg Loss: N/A"
    log_message = f"Episode {i_episode + 1}/{NUM_EPISODES} | Total Reward: {total_reward:.2f} | {loss_str}"
    print(log_message)
    # 4. 写入到日志文件
    with open(log_file_path, "a") as f:
        f.write(log_message + "\n")

    if (i_episode + 1) % VISUALIZE_EVERY == 0:
        save_episode_image(state, i_episode + 1, total_reward, avg_loss, output_dir)

    if (i_episode + 1) % SAVE_MODEL_EVERY == 0:
        torch.save(
            agent.policy_net.state_dict(),
            os.path.join(output_dir, f"q_network_episode_{i_episode+1}.pth"),
        )

print(f"训练完成! 图像保存在 '{output_dir}' 文件夹中。")
