# collect_train_without_dash.py
import os
import cv2
import mss
import time
import h5py
import torch
import random
import threading
import numpy as np
import pydirectinput
import torch.nn as nn
from collections import deque
from typing import Tuple, List
import torch.nn.functional as F
import matplotlib.pyplot as plt
from Activate_Window import activate_window
from torchvision.models import efficientnet_b0
from config import (FPS, DURATION, KNIGHT_HP_THRESHOLD, REWARD_RANGE, CHECK_TIME,WRITE_IN_FPS, DEVICE, MAX_EPISODES,
                    CAPTURE_WINDOW, KNIGHT_HP_WINDOW, BOSS_HP_WINDOW,BATTLE_WINDOW,
                    TRAJECTORY_SAVE_PATH, MODEL_PATH, REWARD_LOG_PATH,
                    ATOMIC_ACTIONS,
                    LR, EPOCHS_PER_BATCH, GAMMA,
                    MAX_TRAJECTORY, TARGET_UPDATE, EPOCH, BATCH_SIZE, TAU, JUMP_COOLDOWN)

# 全局动作字典
current_action = {
    'ad'    : 0, # 0: none, 1: a, 2: d
    'space' : 0, # 0: up, 1: down
    'attack': 0, # 0: j, 1: wjw, 2: sjs
}
# 动作线程锁保护current_action的读写
action_lock = threading.RLock()

# 编码动作
def encode_action(ad, space, attack) -> int:
    # 传入*action_list即可
    return ad * 6 + space * 3 + attack # 0-17

# 解码动作
def decode_action(action_id) -> list:
    ad = action_id // 6
    space = (action_id % 6) // 3
    attack = action_id % 3
    return [ad, space, attack]

# 退出事件，立即终止动作线程
shutdown_event = threading.Event() # False

# 控制水平移动的线程，确认无误
def ad_thread():
    """ 0-停止 1-左 2-右 """
    last_ad = 0
    while not shutdown_event.is_set():
        with action_lock:
            ad = current_action['ad']
        if ad != last_ad:
            if ad == 1:
                pydirectinput.keyDown('a')
                if last_ad == 2:
                    pydirectinput.keyUp('d')
            elif ad == 2:
                pydirectinput.keyDown('d')
                if last_ad == 1:
                    pydirectinput.keyUp('a')
            else: # ad = 0
                if last_ad == 1:
                    pydirectinput.keyUp('a')
                else:
                    pydirectinput.keyUp('d')
            last_ad = ad
        time.sleep(CHECK_TIME)

# todo:跳跃逻辑修改
# def space_thread():
#     """ 控制 space 跳跃 """
#     last_space = 0
#     while not shutdown_event.is_set():
#         with action_lock:
#             space = current_action['space']
#         if space != last_space:
#             if space == 1:
#                 pydirectinput.keyDown('space')
#             else:
#                 pydirectinput.keyUp('space')
#             last_space = space
#         time.sleep(CHECK_TIME)

# 跳跃方案②
# 全局变量
last_jump_time = 0
def space_thread():
    global last_jump_time
    while not shutdown_event.is_set():
        with action_lock:
            space = current_action['space']
        current_time = time.time()
        if space == 1 and (current_time - last_jump_time) > JUMP_COOLDOWN:
            last_jump_time = time.time()
            pydirectinput.keyDown('space')
            time.sleep(0.5)  # 几乎是最大跳
            pydirectinput.keyUp('space')
        # 如果 space==0，不做任何事（因为按下是瞬时的）
        time.sleep(CHECK_TIME)

# 执行攻击动作的线程，决定不消费
def attack_thread():
    """ 持续攻击动作（j / wjw / sjs）"""
    while not shutdown_event.is_set():
        attack_type = 0
        with action_lock:
            attack_type = current_action['attack']
            # current_action['attack'] = 0 # 不消费
        if attack_type == 0:
            pydirectinput.press('j')
        elif attack_type == 1:
            pydirectinput.keyDown('w')
            pydirectinput.press('j')
            pydirectinput.keyUp('w')
        elif attack_type == 2:
            pydirectinput.keyDown('s')
            pydirectinput.press('j')
            pydirectinput.keyUp('s')
        time.sleep(CHECK_TIME)

# 原始画面截图
def capture_frame(sct)->np.ndarray:
    img = np.array(sct.grab(CAPTURE_WINDOW))    # numpy.ndarray BGRA (H, W, 4)
    return img[:, :, :3]                        # numpy.ndarray BGR(H, W, 3) uint8

# 从原图提取小骑士、BOSS血量图、战斗图
def extract_subframe(frame,window)->np.ndarray:
    y  = window["top"]
    x  = window["left"]
    dy = window["height"]
    dx = window["width"]
    return frame[y:y+dy, x:x+dx]

# 全局小骑士血量参照图
KNIGHT_REF_IMG = cv2.imread("knight_ref_hp.png")

# 小骑士血量图 -> 扣血数int
def detect_knight_hp_change(cmp_img,threshold=KNIGHT_HP_THRESHOLD)->int:
    match  = (cmp_img == KNIGHT_REF_IMG).all(axis=2)[0]
    diff   = np.diff(match.astype(np.int8), prepend=0, append=0)
    starts = np.where(diff == 1)[0]
    ends   = np.where(diff == -1)[0]
    change = np.sum(ends - starts >= threshold)
    return change # int

# BOSS血量图 -> 剩余血量百分比float
def detect_boss_hp_percentage(hp_bar_img)->float:
    # [5,0,96]
    if not np.array_equal(hp_bar_img[0, 0], [5, 0, 96]):
        return 0.0
    w = BOSS_HP_WINDOW["width"]
    diff = np.diff(hp_bar_img[0, :, :].astype(np.int16), axis=0)
    candidates = np.where(np.abs(diff).max(axis=1) > 1)[0]
    if len(candidates) > 0:
        return candidates[0] / w
    else:
        return 1.0

# 数据缓存器(H5文件会越来越大的问题，不存在文件但是被预留了空间，没懂是为什么)
class TrajectoryBuffer:
    def __init__(self, max_len: int):
        self.max_len = max_len
        self.counts     = deque(maxlen=max_len)
        self.frames     = deque(maxlen=max_len)
        self.rewards    = deque(maxlen=max_len)
        self.knight_hp  = deque(maxlen=max_len)
        self.boss_hp    = deque(maxlen=max_len)
        self.actions    = deque(maxlen=max_len)

    def push(self, count: int, frame: np.ndarray, knight: int, boss: float, action: list):
        """ 推入新帧 """
        self.counts.append(count)
        self.frames.append(frame.copy())
        self.rewards.append(0.0)
        self.knight_hp.append(knight)
        self.boss_hp.append(boss)
        self.actions.append(np.array(action)) # list -> np.ndarray

    def backfill_previous(self, new_reward: float):
        """ 将队列中最后一步的 reward 叠加 new_reward """
        start_idx = -1
        # end_idx   = -min(len(self.rewards),REWARD_RANGE)
        end_idx = -1 # 单步奖励
        for index in range(start_idx, end_idx - 1, -1):  # 从 -1 到 end_idx
            self.rewards[index] += new_reward

    def pop_oldest_and_save(self, h5_group):
        """ 弹出最老的一帧并保存 """
        if len(self.frames) == 0:
            return False

        # popleft() 弹出最左边（最老的）
        count   = self.counts.popleft()
        frame   = self.frames.popleft()
        reward  = self.rewards.popleft()
        knight  = self.knight_hp.popleft()
        boss    = self.boss_hp.popleft()
        action  = self.actions.popleft()

        # 写入 HDF5，注意写入的格式
        step_grp = h5_group.create_group(f"step_{count}")
        step_grp.create_dataset("count",     data=count) # int
        step_grp.create_dataset("frame",     data=frame) # uint8 numpy.ndarray (WRITE_IN_FPS, 200, 512)
        step_grp.create_dataset("reward",    data=reward) # float
        step_grp.create_dataset("knight_hp", data=knight) # int
        step_grp.create_dataset("boss_hp",   data=boss) # float
        step_grp.create_dataset("action",    data=action) # int np.ndarray [ad, space, attack]

        return True

    def is_full(self):
        return len(self.counts) == self.max_len

    def flush_all(self, h5_group):
        """ 游戏结束时，把剩下的全写进去 """
        while len(self.counts) > 0:
            self.pop_oldest_and_save(h5_group)

# 释放所有按键
def release_all_keys():
    for key in ATOMIC_ACTIONS:
        pydirectinput.keyUp(key)

# todo:q网络需要能够从图像中提取有用的特征输出q值
#  输入(B,WRITE_IN_FPS,H,W)的图像状态Tensor，输出(B,18)即18个离散动作(0-17)的q值Tensor
# q网络原方案
# class Qnet(nn.Module):
#     def __init__(self, action_dim: int, write_in_fps: int = WRITE_IN_FPS):
#         super().__init__()
#         self.conv = nn.Sequential(
#             nn.Conv2d(write_in_fps, 32, kernel_size=8, stride=4),
#             nn.ReLU(),
#             nn.Conv2d(32, 64, kernel_size=4, stride=2),
#             nn.ReLU(),
#             nn.Conv2d(64, 64, kernel_size=3, stride=1),
#             nn.ReLU(),
#             nn.Flatten()
#         )
#         # 推导输出尺寸
#         with torch.no_grad():
#             dummy = torch.zeros(1, write_in_fps, 200, 512)
#             conv_out = self.conv(dummy)
#             fc_in = conv_out.shape[1]
#         self.fc = nn.Sequential(
#             nn.Linear(fc_in, 1024),
#             nn.ReLU(),
#             nn.Linear(1024, action_dim)
#         )
#
#     def forward(self, x: torch.Tensor) -> torch.Tensor:
#         # 输入的x: (B, WRITE_IN_FPS, 200, 512),数据类型dtype=torch.float32
#         x = x / 255.0  # 归一化到[0,1]
#         conv_out = self.conv(x)
#         return self.fc(conv_out) # torch.Tensor (B,24)

# q网络方案一：更多卷积层 + ResBlock + SE Attention
# class SEBlock(nn.Module):
#     """Squeeze-and-Excitation Block"""
#     def __init__(self, channel, reduction=4):
#         super().__init__()
#         self.avg_pool = nn.AdaptiveAvgPool2d(1)
#         hidden_dim = max(channel // reduction, 8)  # 防止通道太少时 reduction 太大
#         self.fc = nn.Sequential(
#             nn.Linear(channel, hidden_dim, bias=False),
#             nn.ReLU(),
#             nn.Linear(hidden_dim, channel, bias=False),
#             nn.Sigmoid()
#         )
#
#     def forward(self, x):
#         b, c, _, _ = x.size()
#         y = self.avg_pool(x).view(b, c)
#         y = self.fc(y).view(b, c, 1, 1)
#         return x * y.expand_as(x)
#
#
# class ResBlock(nn.Module):
#     """Pre-activation Residual Block without BN (for DQN stability)"""
#     def __init__(self, channels):
#         super().__init__()
#         self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
#         self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
#
#     def forward(self, x):
#         residual = x
#         out = F.relu(x)
#         out = self.conv1(out)
#         out = F.relu(out)
#         out = self.conv2(out)
#         return out + residual
#
#
# class Qnet(nn.Module):
#     def __init__(self, action_dim: int, write_in_fps: int = WRITE_IN_FPS):
#         super().__init__()
#         self.write_in_fps = write_in_fps
#
#         # 卷积主干：更深 + 残差 + 注意力
#         self.conv = nn.Sequential(
#             # Stage 1
#             nn.Conv2d(write_in_fps, 32, kernel_size=8, stride=4),
#             nn.ReLU(),
#             # Stage 2
#             nn.Conv2d(32, 64, kernel_size=4, stride=2),
#             nn.ReLU(),
#             # Stage 3
#             nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
#             nn.ReLU(),
#             SEBlock(128),
#             # Stage 4: Residual blocks for deeper feature extraction
#             ResBlock(128),
#             ResBlock(128),
#             # Flatten
#             nn.Flatten()
#         )
#
#         # 自动推导全连接层输入维度
#         with torch.no_grad():
#             dummy = torch.zeros(1, write_in_fps, 200, 512)
#             conv_out = self.conv(dummy)
#             fc_in = conv_out.shape[1]
#
#         self.fc = nn.Sequential(
#             nn.Linear(fc_in, 1024),
#             nn.ReLU(),
#             nn.Linear(1024, action_dim)
#         )
#
#     def forward(self, x: torch.Tensor) -> torch.Tensor:
#         # x: (B, WRITE_IN_FPS, 200, 512), dtype=torch.float32
#         x = x / 255.0  # 归一化到 [0, 1]
#         conv_out = self.conv(x)
#         q_values = self.fc(conv_out)
#         return q_values

# q网络方案二直接可用的 EfficientNet-B0 主干 Q 网络实现
class Qnet(nn.Module):
    def __init__(self, action_dim: int, write_in_fps: int = WRITE_IN_FPS):
        super().__init__()
        # 加载 EfficientNet-B0，不加载预训练权重（从零训练）
        backbone = efficientnet_b0(weights=None)  # weights=None 表示随机初始化

        # 替换第一层卷积以接受 write_in_fps 通道（原为 3 通道 RGB）
        original_conv = backbone.features[0][0]  # 第一个 Conv2d
        backbone.features[0][0] = nn.Conv2d(
            in_channels=write_in_fps,
            out_channels=original_conv.out_channels,
            kernel_size=original_conv.kernel_size,
            stride=original_conv.stride,
            padding=original_conv.padding,
            bias=original_conv.bias is not None
        )

        # 保留从输入到全局池化前的所有特征提取层
        self.features = backbone.features  # 输出形状: (B, 1280, H', W')

        # EfficientNet-B0 最后一层输出通道数是 1280
        # 使用 AdaptiveAvgPool2d(1) 避免对输入尺寸敏感（你的图像是 200x512，没问题）
        self.avgpool = nn.AdaptiveAvgPool2d(1)

        # 全连接头
        self.fc = nn.Sequential(
            nn.Linear(1280, 1024),
            nn.ReLU(),
            nn.Linear(1024, action_dim)
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # x: (B, WRITE_IN_FPS, 200, 512), dtype=torch.float32
        x = x / 255.0  # 归一化到 [0, 1]
        x = self.features(x)      # (B, 1280, H', W')
        x = self.avgpool(x)       # (B, 1280, 1, 1)
        x = torch.flatten(x, 1)   # (B, 1280)
        q_values = self.fc(x)     # (B, action_dim)
        return q_values

class DQN_Agent:
    """ DQN智能体:根据状态输出q值指导动作选择;根据batch的(s,a,r,s')更新Qnet;加载和保存Qnet参数 """
    def __init__(
        self,
        action_dim: int = 18, # 3×2×3
        device: torch.device = DEVICE, # cuda
        learning_rate: float = LR, # 学习率
        target_update: int = TARGET_UPDATE, # 目标网络硬更新频率
        tau: float = TAU, # 目标网络软更新速度
        epsilon: float = 1.0, # epsilon-贪婪策略
        final_epsilon: float = 0.05,
        epsilon_decay: float =  (1 - 0.05) / ((MAX_EPISODES-MAX_TRAJECTORY) * 0.5),
        discount_factor: float = GAMMA, # 奖励折扣因子
        dqn_type: str ='DoubleDQN', # 'VanillaDQN' 或者 'DoubleDQN'
    ):
        self.action_dim = action_dim
        self.device = device
        self.learning_rate = learning_rate      # 学习率
        self.target_update = target_update      # 目标网络更新频率
        self.tau = tau
        self.epsilon = epsilon                  # epsilon-贪婪策略
        self.final_epsilon = final_epsilon
        self.epsilon_decay = epsilon_decay
        self.discount_factor = discount_factor  # 奖励折扣因子
        self.dqn_type = dqn_type

        self.q_net          = Qnet(action_dim).to(device)
        self.target_q_net   = Qnet(action_dim).to(device)
        # 看似冗余的同步，其实规范
        self.target_q_net.load_state_dict(self.q_net.state_dict())

        # 只给优化器传入q_net的参数，故在写loss的时候不用detach张量q_targets，即使它来自target_q_net。梯度不会回传到target_q_net
        self.optimizer = torch.optim.Adam(self.q_net.parameters(), lr=learning_rate)

        self.count = 0  # 记录主网络更新次数

    def take_action(self, state: np.ndarray) -> List[int]:
        # state: numpy.ndarray (WRITE_IN_FPS, 200, 512) uint8
        if np.random.random() < self.epsilon:
            action_id = random.randint(0, self.action_dim - 1)
        else:
            # state[None, :] 或者 np.expand_dims(state,0): numpy.ndarray (1,WRITE_IN_FPS,H,W)
            # 因为Qnet要处理带batch维度的数据，所以要调整
            state = torch.tensor(np.expand_dims(state,0), dtype=torch.float32, device=self.device)
            # state: torch.Tensor torch.Size([1,WRITE_IN_FPS,H,W])

            # self.q_net(state): torch.Tensor torch.Size([1, 18])
            # torch.Size([1, 18]).argmax(dim=1): torch.Tensor torch.Size([1,])
            # torch.Size([1,]).item(): 将张量内容转化为int
            with torch.no_grad():
                q_values = self.q_net(state)
            action_id = q_values.argmax(dim=1).item()

        return decode_action(action_id)  # list of int [ad,space,attack]

    def update(self, batch_data: Tuple[np.ndarray, ...]):
        states, actions, rewards, next_states, dones = batch_data

        # uint8 -> float32
        states      = torch.tensor(states, dtype=torch.float32,      device=self.device) # torch.Size([B,WRITE_IN_FPS,H,W])
        actions     = torch.tensor(actions, dtype=torch.long,        device=self.device).unsqueeze(1) # torch.Size([B,1])
        rewards     = torch.tensor(rewards, dtype=torch.float32,     device=self.device).unsqueeze(1) # torch.Size([B,1])
        next_states = torch.tensor(next_states, dtype=torch.float32, device=self.device) # torch.Size([B,WRITE_IN_FPS,H,W])
        dones       = torch.tensor(dones, dtype=torch.float32,       device=self.device).unsqueeze(1) # torch.Size([B,1])

        # torch.Size([B,18]).gather(1, actions): 在torch.Size([B,18])的index=1维上以actions:torch.Size([B,1])为索引取值
        # 得torch.Size([B,1])即q值
        q_values = self.q_net(states).gather(1, actions)

        # 即使优化器只传入了q_net的参数，这里计算q_targets还是要with torch.no_grad()
        # 不是因为会影响反向传播，而是:①节省开销，不创建计算图②代码逻辑清晰
        with torch.no_grad():
            if self.dqn_type == 'DoubleDQN':
                max_action = self.q_net(next_states).max(1)[1].unsqueeze(1) # torch.Size([B,1])
                max_next_q_values = self.target_q_net(next_states).gather(1, max_action) # torch.Size([B,1])
            else:
                # torch.Size([B,18]).max(1): 在index=1上取最大值，返回一个命名元组(values, indices)
                # 用[0]取values: torch.Size([B]) .unsqueeze(1)后: torch.Size([B,1])
                max_next_q_values = self.target_q_net(next_states).max(1)[0].unsqueeze(1) # torch.Size([B,1])
            q_targets = rewards + self.discount_factor * max_next_q_values * (1 - dones) # # torch.Size([B,1])

        # torch.Size([B,1])和torch.Size([B,1])两两之差的平方的平均，均方误差
        dqn_loss = F.mse_loss(q_values, q_targets, reduction='mean')

        self.optimizer.zero_grad()
        dqn_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.q_net.parameters(), max_norm=1.0) # 梯度裁剪
        self.optimizer.step()
        self.count += 1
        print(f"📉 Step {self.count}: DQN Loss = {dqn_loss.item():.4f}")

        # 软更新：每次主网络更新后都略微更新目标网络
        for target_param, param in zip(self.target_q_net.parameters(), self.q_net.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

        # if self.count % self.target_update == 0: # 每target_update次更新主网络，更新一次目标网络
        #     # 这是硬更新
        #     self.target_q_net.load_state_dict(self.q_net.state_dict())

    def decay_epsilon(self):
        """Reduce exploration rate after each episode."""
        self.epsilon = max(self.final_epsilon, self.epsilon - self.epsilon_decay)

    def save_model(self, path: str):
        """保存主网络、优化器状态和训练超参数"""
        torch.save({
            'q_net_state_dict': self.q_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'epsilon': self.epsilon,
            'count': self.count
        }, path)
        print(f"Model saved to {path}")

    def load_model(self, path: str):
        """加载主网络，并同步目标网络"""
        if not os.path.exists(path):
            print(f"No saved model found at {path}, starting from scratch.")
            return False

        checkpoint = torch.load(path, map_location=self.device, weights_only=True)
        self.q_net.load_state_dict(checkpoint['q_net_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.epsilon = checkpoint.get('epsilon', self.epsilon)
        self.count = checkpoint.get('count', self.count)

        self.target_q_net.load_state_dict(self.q_net.state_dict())

        print(f"Model loaded from {path} and target network synchronized.")
        return True

def main(episode_id, save_path, agent):
    complete_mark = True # 默认本次采样成功，如果发生意外就改变这个值返回不成功
    current_action.update({k: 0 for k in current_action.keys()}) # 动作字典重置
    # 定义动作线程
    threads = [
        threading.Thread(target=ad_thread, daemon=True),
        threading.Thread(target=space_thread, daemon=True),
        threading.Thread(target=attack_thread, daemon=True)
    ]
    activate_window("Hollow Knight")
    time.sleep(1)
    pydirectinput.press("w")
    time.sleep(3)
    pydirectinput.press("w")
    time.sleep(1)
    pydirectinput.press("space")
    time.sleep(3)

    with mss.mss() as sct, h5py.File(save_path, 'a') as f:
        group_name = f"episode_{episode_id}"
        if group_name in f:
            del f[group_name]
        episode_grp = f.create_group(group_name)

        buffer = TrajectoryBuffer(max_len=REWARD_RANGE+1)
        count            = 0             # 截图张数唯一标识
        prev_knight      = 10            # 小骑士上轮生命值
        prev_knight_loss = 0             # 小骑士上轮受伤值
        prev_boss        = 0             # BOSS上轮生命值
        frame_stack      = deque(maxlen=WRITE_IN_FPS)

        shutdown_event.clear()  # False
        for t in threads:
            t.start() # 启动动作线程

        start_time = time.time()
        while True:
            next_frame_time = start_time + (count + 1) / FPS
            current_time = time.time()
            if current_time < next_frame_time:
                time.sleep(next_frame_time - current_time)

            frame = capture_frame(sct)
            count += 1 # 截图张数

            # 分割frame为三个部分
            # ①战斗画面
            battle_frame = extract_subframe(frame,BATTLE_WINDOW) # 截取
            gray_battle_frame = cv2.cvtColor(battle_frame, cv2.COLOR_BGR2GRAY) # 灰度
            h = BATTLE_WINDOW["height"]
            w = BATTLE_WINDOW["width"]
            # todo:决定输入图像大小和形状，是否预处理图像 (w // 5, h // 5)
            resized_gray = cv2.resize(gray_battle_frame, (w // 5, h // 5), interpolation=cv2.INTER_AREA) # 压缩
            frame_stack.append(resized_gray) # deque of ndarray
            # ②小骑士血量
            knight_hp_bar = extract_subframe(frame, KNIGHT_HP_WINDOW)
            current_knight_loss = detect_knight_hp_change(knight_hp_bar, KNIGHT_HP_THRESHOLD)
            current_knight = prev_knight - (current_knight_loss if current_knight_loss != prev_knight_loss else 0)
            if current_knight_loss != prev_knight_loss or prev_knight_loss == 0:
                prev_knight_loss = current_knight_loss
            # ③boss血量
            boss_hp_bar = extract_subframe(frame,BOSS_HP_WINDOW)
            current_boss = detect_boss_hp_percentage(boss_hp_bar)

            # todo:设计奖励，躲技能优先，攻击其次，尽快结束战斗最次
            is_done = False
            # ①分析小骑士，其血量提供的对局信息更加确定
            if current_knight == prev_knight: # 无伤
                if buffer.counts:
                    buffer.backfill_previous(0.01) # 存活奖励
            elif current_knight == 0:  # 角色死亡，采样结束，先不break等待boss血量结算
                buffer.backfill_previous(-1.0)
                print(f"step:{count:3d}:Knight-{prev_knight - current_knight}HP！dead！reward:-1")
                is_done = True
            elif current_knight < prev_knight: # 受伤惩罚比造成伤害更大，先学躲技能再学砍人
                buffer.backfill_previous(-1.0)
                print(f"step:{count:3d}:Knight-{prev_knight-current_knight}HP！reward:-1")
            # 这里没有考虑小骑士加血的情况，即在当前假设下血量是单调递减的
            else:
                is_done = True
                print("小骑士血量出错！")

            # ②分析BOSS血量，其血量提供的信息更迷惑多样
            if current_boss == prev_boss: # 开头还没攻击到、中间没攻击到
                pass
            elif current_boss == 0 and prev_boss != 0: # 单boss单阶段，这就是击败boss的瞬间
                if prev_boss > 0.1:
                    complete_mark = False  # 重新采样标记
                    print("血量采集出错，重新采集一次")
                else:
                    buffer.backfill_previous(0.1)
                    print(f"step:{count:3d}:BOSS-{prev_boss:.2f}HP BOSS dead！ reward:1")
                is_done = True
            elif (current_boss < prev_boss) or (prev_boss == 0 and current_boss != 0):
                buffer.backfill_previous(0.1)
                print(f"step:{count:3d}:BOSS-{(1 if prev_boss == 0 else prev_boss)-current_boss:.2f}HP reward:1")
            # 这里后面可以尝试处理的逻辑是：boss血量恢复了，部分回复或者回复到1.现在就只打单boss单阶段是没问题的
            else:
                is_done = True
                print("BOSS血量出错！")

            if (count % WRITE_IN_FPS) == 0:
                frames_array = np.stack(frame_stack) # deque of ndarray(200,512) -> ndarray(WRITE_IN_FPS,200,512)
                action = agent.take_action(frames_array) # list of int: [ad, space, attack]
                with action_lock:
                    current_action['ad']     = action[0]
                    current_action['space']  = action[1]
                    current_action['attack'] = action[2]
                buffer.push(int(count/WRITE_IN_FPS-1), frames_array, current_knight, current_boss, action)

            # reward更新完，如果 buffer 满了，就弹出最老的一帧保存到文件
            if buffer.is_full():
                buffer.pop_oldest_and_save(episode_grp)

            if is_done == True or (time.time() - start_time) >= DURATION:
                print(f"{time.time() - start_time:.1f}s : ⏹️ 采样结束")
                break

            prev_knight = current_knight
            prev_boss   = current_boss

        total_time = time.time() - start_time
        actual_fps = count / total_time  # 帧数 / 总时间
        print(f"实际平均采样率: {actual_fps:.2f} FPS")

        # 通知动作线程退出
        shutdown_event.set()  # 标记为退出状态 True
        for t in threads:
            t.join()
        release_all_keys()

        total_reward = 0.0
        if not complete_mark:
            del f[group_name]  # 删除整个 episode
        else:
            buffer.flush_all(episode_grp) # 把剩下的帧写进文件

            step_keys = sorted(
                [k for k in episode_grp.keys() if k.startswith('step_')],
                key=lambda x: int(x.split('_')[-1])  # 按 step 编号排序
            )
            for step_key in step_keys:
                if 'reward' in episode_grp[step_key]:
                    r = episode_grp[step_key]['reward'][()]
                    total_reward += float(r)
            print(f"✅ Episode {episode_id} finished. Total reward: {total_reward:.2f}")

    activate_window("HollowKnight_RL – collect_train_without_dash.py")

    return complete_mark, total_reward

# 返回文件中所有episode_id
def get_all_episode_ids(save_path=TRAJECTORY_SAVE_PATH) -> list:
    """ 返回所有 episode_id 列表（已排序） """
    try:
        with h5py.File(save_path, 'r') as f:
            ids = []
            for key in f.keys():
                if key.startswith("episode_"):
                    try:
                        idx = int(key.split("_")[1])
                        ids.append(idx)
                    except ValueError:
                        continue
            return sorted(ids)
    except (OSError, FileNotFoundError):
        return []

# 如果文件中的轨迹数=MAX_TRAJECTORY，清理一条最旧的
def cleanup_old_episodes(save_path=TRAJECTORY_SAVE_PATH, max_episodes=MAX_TRAJECTORY):
    """ 确保文件中最多保留 max_episodes 个 episode，删除最老的（id 最小的） """
    ids = get_all_episode_ids(save_path)
    if len(ids) >= max_episodes:
        to_remove = len(ids) - max_episodes + 1  # 多出来的数量（+1 是因为我们要腾出一个位置给新 episode）
        with h5py.File(save_path, 'a') as f:
            for i in range(to_remove):
                old_id = ids[i]
                group_name = f"episode_{old_id}"
                if group_name in f:
                    del f[group_name]

# 根据list of episode_id提取轨迹数据
def load_episodes_by_id(episode_ids: List[int], h5_path: str=TRAJECTORY_SAVE_PATH) -> dict:
    """ 加载episode_ids列表中所有 episode 数据，并构建有效的 (s, a, r, s', done) transitions """
    states = []
    actions = []
    rewards = []
    next_states = []
    dones = []

    with h5py.File(h5_path, 'r') as f:
        for ep_id in episode_ids:
            ep_name = f"episode_{ep_id}"
            if ep_name not in f:
                print(f"⚠️ {ep_name}不存在！")
                continue

            steps = sorted(f[ep_name].keys(), key=lambda x: int(x.split('_')[-1]))
            # steps: list of sorted str by step_id

            ep_states = []
            ep_actions = []
            ep_rewards = []
            for step in steps:
                grp = f[ep_name][step] # 取数据到grp
                frame = grp['frame'][:] # numpy.ndarray of uint8 (T, H, W)
                action = grp['action'][:].tolist() # list of int
                reward = grp['reward'][()] # float
                ep_states.append(frame)
                ep_actions.append(action)
                ep_rewards.append(reward)

            for i in range(len(ep_states) - 1):
                states.append(ep_states[i])
                actions.append(ep_actions[i])
                # rewards.append(ep_rewards[i] if ep_rewards[i]!=0.0 else 0.1) # 存活奖励0.1
                rewards.append(ep_rewards[i])
                next_states.append(ep_states[i + 1])
                if i == len(ep_states) - 2:
                    dones.append(1.0)
                else:
                    dones.append(0.0)

    return {
        'states': states,          # list of (numpy.ndarray of uint8) (T, H, W)
        'actions': actions,        # list of (list of int) [ad, space, attack]
        'rewards': rewards,        # list of float
        'next_states': next_states, # list of (numpy.ndarray of uint8) (T, H, W)
        'dones': dones,            # list of 0.0 and 1.0
    }

def sample_batch(current_id: list, h5_path: str, batch_size: int):
    data = load_episodes_by_id(current_id, h5_path)  # dict
    if data is None or len(data['states']) < 2:
        return None

    states = data['states']        # list of (numpy.ndarray of uint8) (T, H, W)
    actions = data['actions']      # list of (list of int) [ad, space, attack]
    rewards = data['rewards']      # list of float
    next_states = data['next_states'] # list of (numpy.ndarray of uint8) (T, H, W)
    dones = data['dones'] # list of 0.0 and 1.0

    indices = np.random.choice(len(states), size=min(batch_size, len(states)), replace=False)

    batch_states = np.stack([states[i] for i in indices])                   # numpy.ndarray of uint8 (B, T, H, W)
    batch_actions = np.array([encode_action(*actions[i]) for i in indices]) # numpy.ndarray of int (B,)
    batch_rewards = np.array([rewards[i] for i in indices])                 # numpy.ndarray of float (B,)
    batch_next_states = np.stack([next_states[i] for i in indices])         # numpy.ndarray of uint8 (B, T, H, W)
    batch_dones = np.array([dones[i] for i in indices])                     # numpy.ndarray of 0.0 and 1.0 (B,)

    return batch_states, batch_actions, batch_rewards, batch_next_states, batch_dones

if __name__ == '__main__':
    # 加载智能体
    agent = DQN_Agent(
        action_dim=18,
        device=DEVICE,
        learning_rate=LR,
        target_update=5,
        tau=TAU,
        epsilon=1.0,
        final_epsilon=0.05,
        epsilon_decay=(1 - 0.05) / ((MAX_EPISODES-MAX_TRAJECTORY) * 0.5),
        discount_factor=GAMMA,
        dqn_type='DoubleDQN',
    )
    agent.load_model(MODEL_PATH)

    # 采集MAX_EPISODES条轨迹
    for _ in range(MAX_EPISODES):
        # 清理最老轨迹
        cleanup_old_episodes(TRAJECTORY_SAVE_PATH, MAX_TRAJECTORY)
        # 获取next_id
        current_ids = get_all_episode_ids(TRAJECTORY_SAVE_PATH)
        next_id = (current_ids[-1] + 1) if current_ids else 0

        # 采集一条轨迹
        print(f"🎮 开始第 {next_id + 1} 局采样 (episode_{next_id})...")
        complete = False
        total_reward = 0.0
        while not complete:
            complete, total_reward = main(episode_id=next_id, save_path=TRAJECTORY_SAVE_PATH, agent=agent)
            time.sleep(1) # 每局之间暂停
        with open(REWARD_LOG_PATH, 'a') as f:
            f.write(f"{total_reward}\n")

        # 训练
        current_ids = get_all_episode_ids(TRAJECTORY_SAVE_PATH)
        if len(current_ids) == MAX_TRAJECTORY:
            print("🧠 开始训练...")
            for epoch in range(EPOCH):
                batch = sample_batch(current_ids, TRAJECTORY_SAVE_PATH, batch_size=BATCH_SIZE)
                if batch is None:
                    break
                for _ in range(EPOCHS_PER_BATCH):
                    agent.update(batch)
                    print("update...")
            agent.save_model(MODEL_PATH)
            agent.decay_epsilon()

        # 加载奖励历史，可视化
        rewards_history = []
        if os.path.exists(REWARD_LOG_PATH):
            with open(REWARD_LOG_PATH, 'r') as f:
                rewards_history = [float(line.strip()) for line in f.readlines()]
            print(f"📈 已加载 {len(rewards_history)} 轮历史奖励")
        plt.figure(figsize=(10, 5))
        plt.plot(rewards_history, marker='o')
        plt.title("Episode Rewards Over Time")
        plt.xlabel("Episode")
        plt.ylabel("Total Reward")
        plt.grid(True)
        plt.savefig("reward_curve.png")
        plt.show()
        time.sleep(4)  # 暂停以查看损失和奖励变化
    print("🎉 训练完成！")
