﻿# DQN 生成数字图片的环境
import math
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from collections import namedtuple, deque
import os

DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
BATCH_SIZE = 128
GAMMA = 0.99
EPS_START = 0.1
EPS_END = 0.05
EPS_DECAY = 1000
TAU = 0.005
LR = 1e-4

Transition = namedtuple(
    "Transition", ("state", "action", "reward", "next_state", "done")
)


class AdditiveSpatialDQN_QNetwork(nn.Module):

    def __init__(self):
        super(AdditiveSpatialDQN_QNetwork, self).__init__()

        # ------------------- 编码器 (下采样路径) -------------------
        self.enc_conv1 = nn.Conv2d(
            1, 32, kernel_size=3, stride=2, padding=1
        )  # -> 14x14
        self.enc_conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1)  # -> 7x7
        self.enc_conv3 = nn.Conv2d(
            64, 128, kernel_size=3, stride=2, padding=1
        )  # -> 4x4

        # ------------------- 瓶颈层 -------------------
        self.bottleneck = nn.Conv2d(128, 256, kernel_size=4)  # -> 1x1

        # ------------------- 解码器 (上采样路径) -------------------
        # 为了更好的效果，可以加入Skip Connections (U-Net的精髓)
        # 这里为了简化，先不加，但你可以轻松地把编码器的输出加到解码器的输入上
        self.dec_tconv1 = nn.ConvTranspose2d(256, 128, kernel_size=4)  # -> 4x4
        self.dec_tconv2 = nn.ConvTranspose2d(
            128, 64, kernel_size=3, stride=2, padding=1, output_padding=0
        )  # -> 7x7
        self.dec_tconv3 = nn.ConvTranspose2d(
            64, 32, kernel_size=3, stride=2, padding=1, output_padding=1
        )  # -> 14x14
        self.dec_tconv4 = nn.ConvTranspose2d(
            32, 16, kernel_size=3, stride=2, padding=1, output_padding=1
        )  # -> 28x28

        # ------------------- 输出层 (关键修改) -------------------
        # 输出通道为1，代表784个动作的Q值被组织在一个28x28的图中
        self.output_layer = nn.Conv2d(16, 1, kernel_size=1)  # -> (N, 1, 28, 28)
        self.LeakyReLU = nn.LeakyReLU(0.01, inplace=True)

    def forward(self, x):
        # 编码
        x = self.LeakyReLU(self.enc_conv1(x))
        x = self.LeakyReLU(self.enc_conv2(x))
        x = self.LeakyReLU(self.enc_conv3(x))

        # 瓶颈
        x = self.LeakyReLU(self.bottleneck(x))

        # 解码
        x = self.LeakyReLU(self.dec_tconv1(x))
        x = self.LeakyReLU(self.dec_tconv2(x))
        x = self.LeakyReLU(self.dec_tconv3(x))
        x = self.LeakyReLU(self.dec_tconv4(x))

        q_values_map = self.output_layer(x)

        return q_values_map


class ReplayMemory:
    def __init__(self, capacity):
        self.memory = deque([], maxlen=capacity)

    def push(self, *args):
        self.memory.append(Transition(*args))

    def sample(self, batch_size):
        return random.sample(self.memory, batch_size)

    def __len__(self):
        return len(self.memory)


class DQNAgent:
    def __init__(self, image_dims=(28, 28), load_model_path=None):
        self.image_h, self.image_w = image_dims
        self.num_actions = image_dims[0] * image_dims[1]  # ★★★ 新增：总动作数 (784)
        self.steps_done = 0

        # ★★★ 确保使用新的网络 ★★★
        self.policy_net = AdditiveSpatialDQN_QNetwork().to(DEVICE)
        self.target_net = AdditiveSpatialDQN_QNetwork().to(DEVICE)

        # --- 加载模型逻辑 (保持不变) ---
        if load_model_path and os.path.exists(load_model_path):
            print(f"--- Loading model from {load_model_path} ---")
            self.policy_net.load_state_dict(torch.load(load_model_path))
        else:
            print("--- Initializing a new model ---")

        self.target_net.load_state_dict(self.policy_net.state_dict())

        # ★★★ 明确设置模式 ★★★
        self.policy_net.train()
        self.target_net.eval()

        self.optimizer = optim.AdamW(self.policy_net.parameters(), lr=LR, amsgrad=True)
        self.memory = ReplayMemory(10000)

    def _preprocess_state(self, state):
        """将单个状态 (H, W) 的Tensor预处理成网络输入格式 (1, 1, H, W)"""
        # 假设输入的state已经是tensor了
        return state.unsqueeze(0).unsqueeze(0).to(DEVICE)

    def _decode_action_index(self, action_index):
        """
        将一维的动作索引解码为环境需要的 (action_type, y, x) 元组。
        这个函数是 Agent 内部的辅助函数。
        """
        # ★★★ 关键逻辑：如何从 0-1567 的索引解码 ★★★
        # 假设我们约定：
        # 索引 0-783   是 action_type = 0 (减少)
        # 索引 784-1567 是 action_type = 1 (增加)

        # 动作类型
        if action_index < self.num_actions:  # num_actions 在这里是 784
            action_type = 1  # 增加
            pixel_index = action_index
        else:
            action_type = 0  # 减少
            pixel_index = action_index - self.num_actions

        # 像素坐标
        y = pixel_index // self.image_w
        x = pixel_index % self.image_w

        return (action_type, y, x)

    def choose_action(self, state):
        sample = random.random()
        eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(
            -1.0 * self.steps_done / EPS_DECAY
        )
        self.steps_done += 1

        # 找出所有可以增加亮度的像素点 (值 < 255)
        valid_increase_indices = (state < 255.0).nonzero(as_tuple=False)

        if len(valid_increase_indices) == 0:
            # 如果没有有效动作，随机返回一个默认动作以防程序崩溃
            return (1, 0, 0)

        if sample > eps_threshold:
            # --- 利用 ---
            with torch.no_grad():
                state_tensor = self._preprocess_state(state)
                q_values_map = self.policy_net(state_tensor)
                increase_q_map = q_values_map[0, 0, :, :]

                # 创建一个只包含无效点Q值的掩码，其余为有效值
                masked_q_map = torch.full_like(increase_q_map, -float("inf"))
                for idx in valid_increase_indices:
                    masked_q_map[idx[0], idx[1]] = increase_q_map[idx[0], idx[1]]

                # 在有效点中找到最大Q值的坐标
                flat_idx = torch.argmax(masked_q_map.flatten())
                y = flat_idx.item() // self.image_w
                x = flat_idx.item() % self.image_w
                return (1, y, x)
        else:
            # --- 探索 ---
            # 从有效坐标中随机选择一个
            random_idx = random.choice(valid_increase_indices)
            y, x = random_idx[0].item(), random_idx[1].item()
            return (1, y, x)

    def update(self):
        if len(self.memory) < BATCH_SIZE:
            return None

        transitions = self.memory.sample(BATCH_SIZE)
        batch = Transition(*zip(*transitions))

        # 1. 准备批处理数据
        state_batch = torch.stack(batch.state).unsqueeze(1).to(DEVICE)
        reward_batch = torch.tensor(batch.reward, dtype=torch.float, device=DEVICE)
        done_mask = torch.tensor(batch.done, dtype=torch.bool, device=DEVICE)

        # 将 [B, 3] 的动作张量转换为 [B, 1] 的一维索引
        action_tensors_batch = torch.stack(batch.action).to(DEVICE)
        y_coords = action_tensors_batch[:, 1]
        x_coords = action_tensors_batch[:, 2]
        pixel_indices = y_coords * self.image_w + x_coords
        action_batch = pixel_indices.unsqueeze(1)

        # 2. 计算当前状态的Q值 Q(s_t, a_t)
        q_maps = self.policy_net(state_batch)
        q_maps_flat = q_maps.view(BATCH_SIZE, -1)
        current_q_values = q_maps_flat.gather(1, action_batch)

        # 3. 计算下一步状态的最大Q值 V(s_{t+1})
        next_state_values = torch.zeros(BATCH_SIZE, device=DEVICE)
        non_final_mask = ~done_mask

        non_final_next_states = (
            torch.stack([s for s, d in zip(batch.next_state, batch.done) if not d])
            .unsqueeze(1)
            .to(DEVICE)
        )

        if len(non_final_next_states) > 0:
            with torch.no_grad():
                next_q_maps = self.target_net(non_final_next_states)
                max_next_q_values = (
                    next_q_maps.view(len(non_final_next_states), -1).max(1).values
                )
                next_state_values[non_final_mask] = max_next_q_values

        # 4. 计算期望的Q值 (贝尔曼方程)
        target_q_values = reward_batch + (GAMMA * next_state_values)

        # 5. 计算损失并优化模型
        criterion = nn.SmoothL1Loss()
        loss = criterion(current_q_values, target_q_values.unsqueeze(1))

        self.optimizer.zero_grad()
        loss.backward()
        torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), 1.0)
        self.optimizer.step()

        return loss.item()

    def update_target_net(self):
        """软更新 Target Network"""
        # 你的软更新代码是正确的，保持不变
        target_net_state_dict = self.target_net.state_dict()
        policy_net_state_dict = self.policy_net.state_dict()
        for key in policy_net_state_dict:
            target_net_state_dict[key] = policy_net_state_dict[
                key
            ] * TAU + target_net_state_dict[key] * (1 - TAU)
        self.target_net.load_state_dict(target_net_state_dict)