﻿# DQN 生成数字图片的环境
import math
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from collections import namedtuple, deque
import os

DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
BATCH_SIZE = 128
GAMMA = 0.99
EPS_START = 0.3
EPS_END = 0.05
EPS_DECAY = 1000
TAU = 0.005
LR = 1e-4

Transition = namedtuple(
    "Transition", ("state", "action", "reward", "next_state", "done")
)


class QNetwork(nn.Module):
    """
    一个用于图像生成DQN的编码器-解码器Q网络。
    输入: (N, 1, 28, 28) - N是批量大小, 1是通道数
    输出: (N, 2, 28, 28) - N是批量大小, 2是每个像素的动作Q值(增加/减少)
    """

    def __init__(self):
        super(QNetwork, self).__init__()

        # ------------------- 编码器 (下采样路径) -------------------
        # 输入: (N, 1, 28, 28)
        self.enc_conv1 = nn.Conv2d(
            1, 32, kernel_size=3, stride=2, padding=1
        )  # -> (N, 32, 14, 14)
        self.enc_conv2 = nn.Conv2d(
            32, 64, kernel_size=3, stride=2, padding=1
        )  # -> (N, 64, 7, 7)
        self.enc_conv3 = nn.Conv2d(
            64, 128, kernel_size=3, stride=2, padding=1
        )  # -> (N, 128, 4, 4) 7/2=3.5 -> 4

        # ------------------- 瓶颈层 -------------------
        self.bottleneck = nn.Conv2d(128, 256, kernel_size=4)  # -> (N, 256, 1, 1)

        # ------------------- 解码器 (上采样路径) -------------------
        # 使用转置卷积 (ConvTranspose2d) 来进行上采样
        self.dec_tconv1 = nn.ConvTranspose2d(
            256, 128, kernel_size=4
        )  # -> (N, 128, 4, 4)
        self.dec_tconv2 = nn.ConvTranspose2d(
            128, 64, kernel_size=3, stride=2, padding=1, output_padding=0
        )  # -> (N, 64, 7, 7)
        self.dec_tconv3 = nn.ConvTranspose2d(
            64, 32, kernel_size=3, stride=2, padding=1, output_padding=1
        )  # -> (N, 32, 14, 14)
        self.dec_tconv4 = nn.ConvTranspose2d(
            32, 16, kernel_size=3, stride=2, padding=1, output_padding=1
        )  # -> (N, 16, 28, 28)

        # ------------------- 输出层 -------------------
        # 最后用一个1x1的卷积将通道数调整为2，代表每个像素的两个动作Q值
        # 注意：Q值不应该有激活函数，因为它们可以是任何实数（正或负）
        self.output_layer = nn.Conv2d(16, 2, kernel_size=1)  # -> (N, 2, 28, 28)

    def forward(self, x):
        # 编码
        x = F.relu(self.enc_conv1(x))
        x = F.relu(self.enc_conv2(x))
        x = F.relu(self.enc_conv3(x))

        # 瓶颈
        x = F.relu(self.bottleneck(x))

        # 解码
        x = F.relu(self.dec_tconv1(x))
        x = F.relu(self.dec_tconv2(x))
        x = F.relu(self.dec_tconv3(x))
        x = F.relu(self.dec_tconv4(x))

        # 输出
        # PyTorch的卷积层期望通道在第二个维度，所以输出是 (N, 2, 28, 28)
        q_values_map = self.output_layer(x)

        return q_values_map


class ReplayMemory:
    def __init__(self, capacity):
        self.memory = deque([], maxlen=capacity)

    def push(self, *args):
        self.memory.append(Transition(*args))

    def sample(self, batch_size):
        return random.sample(self.memory, batch_size)

    def __len__(self):
        return len(self.memory)


class DQNAgent:
    def __init__(self, image_dims=(28, 28), load_model_path=None):
        self.image_h, self.image_w = image_dims
        self.steps_done = 0

        self.policy_net = QNetwork().to(DEVICE)
        # --- 优化的加载逻辑 ---
        if load_model_path and os.path.exists(load_model_path):
            print(f"--- Loading model from {load_model_path} ---")
            self.policy_net.load_state_dict(torch.load(load_model_path))
        else:
            print("--- Initializing a new model ---")

        self.policy_net.train() 
        self.target_net = QNetwork().to(DEVICE)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()  # target_net 只用于评估

        self.optimizer = optim.AdamW(self.policy_net.parameters(), lr=LR, amsgrad=True)
        self.memory = ReplayMemory(10000)

    def _preprocess_state(self, state):
        """将单个状态 (H, W, uint8) 预处理成网络输入格式 (1, 1, H, W, float)"""
        return state.float().unsqueeze(0).unsqueeze(0).to(DEVICE)

    def choose_action(self, state):
        sample = random.random()
        eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(
            -1.0 * self.steps_done / EPS_DECAY
        )
        self.steps_done += 1

        if sample > eps_threshold:
            # --- 利用 (Exploitation): 选择最优动作 ---
            with torch.no_grad():
                state_tensor = self._preprocess_state(state)

                # 2. 获取Q值图:
                #    policy_net的输出形状为 (1, 2, 28, 28)
                q_values_map = self.policy_net(state_tensor)

                # 3. 找到最大Q值的索引:
                #    我们不关心batch维度，所以取[0] -> (2, 28, 28)
                q_map_single = q_values_map[0]

                #    将Q值图展平为一维向量，以便使用argmax
                flat_q_map = q_map_single.view(-1)  # or .flatten()

                #    找到最大值的索引
                flat_idx = torch.argmax(flat_q_map).item()  #  将tensor转为python数字

                # 将一维索引转换回 (action_type, y, x)
                action_type = flat_idx // (self.image_h * self.image_w)
                remainder = flat_idx % (self.image_h * self.image_w)
                y = remainder // self.image_w
                x = remainder % self.image_w

                return (action_type, y, x)
        else:
            # --- 探索 (Exploration): 选择随机动作 ---
            action_type = random.randint(0, 1)  # 0 for 减少灰度, 1 for 增加灰度
            y = random.randint(0, self.image_h - 1)
            x = random.randint(0, self.image_w - 1)
            return (action_type, y, x)

    def update(self):
        """从记忆库中采样并更新网络参数"""
        if len(self.memory) < BATCH_SIZE:
            return None

        transitions = self.memory.sample(BATCH_SIZE)
        batch = Transition(*zip(*transitions))

        # --- 1. 准备数据批次 ---
        # `state` 和 `next_state` 已经是 (28, 28) 的张量
        state_batch = torch.stack(batch.state).float().unsqueeze(1).to(DEVICE)
        action_batch = torch.stack(batch.action).to(DEVICE)  # Shape: [B, 3]
        reward_batch = torch.tensor(batch.reward, dtype=torch.float, device=DEVICE)
        next_state_batch = torch.stack(batch.next_state).float().unsqueeze(1).to(DEVICE)
        done_batch = torch.tensor(batch.done, dtype=torch.float, device=DEVICE)

        # --- 2. 计算 Q(s_t, a_t) ---
        # policy_net 输出 Q-value maps: [B, 2, 28, 28]
        q_maps = self.policy_net(state_batch)

        # 使用 action_batch 从 q_maps 中索引出对应的 Q 值
        # action_batch[:, 0] is action_type, [:, 1] is y, [:, 2] is x
        batch_indices = torch.arange(BATCH_SIZE, device=DEVICE)
        current_q_values = q_maps[
            batch_indices, action_batch[:, 0], action_batch[:, 1], action_batch[:, 2]
        ]

        # --- 3. 计算 V(s_{t+1}) for all next states ---
        with torch.no_grad():
            next_q_maps = self.target_net(next_state_batch)  # -> [B, 2, 28, 28]
            # 找到每个 next_state 的最大 Q 值
            # 先展平 H 和 W 维度
            next_q_flat = next_q_maps.view(BATCH_SIZE, -1)
            max_next_q_values = next_q_flat.max(1).values

        # --- 4. 计算期望的 Q 值 (Target Q-value) ---
        # 如果 episode 结束 (done=1)，则 target Q 只有 reward
        target_q_values = reward_batch + (GAMMA * max_next_q_values * (1 - done_batch))

        # --- 5. 计算损失 ---
        criterion = nn.SmoothL1Loss()
        loss = criterion(current_q_values, target_q_values)

        # --- 6. 优化模型 ---
        self.optimizer.zero_grad()
        loss.backward()
        torch.nn.utils.clip_grad_value_(self.policy_net.parameters(), 100)
        self.optimizer.step()

        return loss.item()

    def update_target_net(self):
        """软更新 Target Network"""
        target_net_state_dict = self.target_net.state_dict()
        policy_net_state_dict = self.policy_net.state_dict()
        for key in policy_net_state_dict:
            target_net_state_dict[key] = policy_net_state_dict[
                key
            ] * TAU + target_net_state_dict[key] * (1 - TAU)
        self.target_net.load_state_dict(target_net_state_dict)