"""
Conflict-aware PEVA 仿真框架 (Python)
- 2 vs 2 无人艇对抗，多资源点
- A 全知 B 的状态 (pos, vel, acc)，但意图用概率预测
- 动态重分配 + 资源冷却刷新
"""

import numpy as np
import math
import itertools
import random
import copy

# ---------- 辅助数学函数 ----------
EPS = 1e-9

def softmax(x, beta=1.0):
    x = np.array(x, dtype=float)
    x = x - np.max(x)
    ex = np.exp(beta * x)
    s = ex / (np.sum(ex) + EPS)
    return s

def kl_divergence(p, q):
    # p, q shape (n,), compute KL(p||q)
    p = np.array(p, dtype=float) + 1e-12
    q = np.array(q, dtype=float) + 1e-12
    return np.sum(p * np.log(p / q))

# ---------- 环境 / 实体类 ----------
class Resource:
    def __init__(self, rid, pos, value, capture_radius=2.0, cooldown=10.0):
        self.id = rid
        self.pos = np.array(pos, dtype=float)
        self.value = float(value)
        self.capture_radius = capture_radius
        self.cooldown = cooldown
        self.status = "free"          # "free", "captured", "cooldown"
        self.owner = None             # (group, boat_id)
        self.cool_timer = 0.0
        # bookkeeping for arrivals in current timestep (reset each step)
        self._arrivals = []

    def mark_arrival(self, boat, crossing_time):
        # boat: Boat instance, crossing_time: absolute sim time when crossing boundary
        self._arrivals.append((boat, crossing_time))

    def resolve_arrivals(self, delta_t_conflict, delta_t_safe, sim_time):
        """
        处理本步收集到的到达事件并判定胜者（若资源仍 free）
        返回： winner_boat 或 None
        """
        if self.status != "free" or len(self._arrivals) == 0:
            self._arrivals = []
            return None

        # 找到最早到达时间
        self._arrivals.sort(key=lambda x: x[1])
        first_time = self._arrivals[0][1]
        # 判断是否存在在冲突窗口内的其他到达者
        conflict_group = [ (b, t) for (b,t) in self._arrivals if t - first_time <= delta_t_conflict + 1e-9 ]

        # 若只有一个最早到达，且与次早到达差 > delta_t_safe -> 先到者赢
        if len(conflict_group) == 1:
            # check second earliest
            if len(self._arrivals) == 1:
                winner = self._arrivals[0][0]
                self._arrivals = []
                return winner
            second_time = self._arrivals[1][1]
            if second_time - first_time > delta_t_safe:
                winner = self._arrivals[0][0]
                self._arrivals = []
                return winner
            else:
                # still conflict (within safe), fall through to capability compare
                pass

        # 冲突情况下按 capability 决胜（在冲突组里）
        # 选 capability 最大者，若多者相同则随机选一个
        max_cap = max([b.capability for (b, t) in conflict_group])
        candidates = [b for (b, t) in conflict_group if abs(b.capability - max_cap) < 1e-9]
        winner = random.choice(candidates)
        self._arrivals = []
        return winner

    def set_captured(self, winner_boat, sim_time):
        self.status = "captured"
        self.owner = (winner_boat.group, winner_boat.id)
        self.cool_timer = self.cooldown
        # 资源进入冷却（由环境来随机重生位置与价值）
        # 返回 value for bookkeeping
        return self.value

    def step_cooldown(self, dt):
        if self.status in ("captured", "cooldown"):
            self.cool_timer -= dt
            if self.cool_timer <= 0:
                self.status = "free"
                self.owner = None
                self.cool_timer = 0.0
                return True  # 表示已从冷却变为 free（环境层可执行随机重生）
        return False

class Boat:
    def __init__(self, bid, group, pos, vel=(0.0,0.0), acc=(0.0,0.0),
                 capability=1.0, max_speed=3.0, max_acc=1.0):
        self.id = bid
        self.group = group  # "A" or "B"
        self.pos = np.array(pos, dtype=float)
        self.prev_pos = np.array(pos, dtype=float)  # 用于截线计算
        self.vel = np.array(vel, dtype=float)
        self.acc = np.array(acc, dtype=float)
        self.capability = float(capability)
        self.max_speed = float(max_speed)
        self.max_acc = float(max_acc)
        # 指派目标（资源 id 与坐标）
        self.target_id = None
        self.target_pos = None
        self.state = "idle"  # "idle", "moving"
        # 统计
        self.total_reward = 0.0
        self.history = []  # [(t, x, y)]
    def set_target(self, resource):
        if resource is None:
            self.target_id = None
            self.target_pos = None
            self.state = "idle"
        else:
            self.target_id = resource.id
            self.target_pos = np.array(resource.pos, dtype=float)
            self.state = "moving"

    def clear_target(self):
        self.set_target(None)

    def step(self, dt):
        # 保存上一时刻位置用于线性插值计算 crossing time
        self.prev_pos = self.pos.copy()
        if self.target_pos is None:
            # 静止
            self.vel *= 0.0
            return
        dir_vec = self.target_pos - self.pos
        dist = np.linalg.norm(dir_vec)
        if dist < 1e-6:
            self.pos = self.target_pos.copy()
            self.vel *= 0.0
            self.state = "idle"
            return
        # desired velocity direction
        des_vel = (dir_vec / dist) * self.max_speed
        # 计算所需加速度去接近 des_vel
        req_acc = (des_vel - self.vel) / max(dt, 1e-9)
        a_norm = np.linalg.norm(req_acc)
        if a_norm > self.max_acc:
            req_acc = req_acc / a_norm * self.max_acc
        # 更新速度、位置
        self.vel += req_acc * dt
        # 限速
        vnorm = np.linalg.norm(self.vel)
        if vnorm > self.max_speed:
            self.vel = self.vel / vnorm * self.max_speed
        self.pos += self.vel * dt

    def distance_to(self, pos):
        return np.linalg.norm(self.pos - np.array(pos, dtype=float))

# ---------- PEVA Planner ----------
class CPEVAPlanner:
    def __init__(self, boats_A, boats_B, resources,
                 delta_t_conflict=2.0, conflict_safe_ratio=1.5,
                 path_correction=1.0, beta_softmax=1.5,
                 c_move=0.0, KL_threshold=0.15,
                 min_win_threshold=0.25, no_win_penalty=50.0):
        """
        boats_A, boats_B: list of Boat objects
        resources: list of Resource objects
        delta_t_conflict: 冲突窗口 (s)
        conflict_safe_ratio: delta_t_safe = conflict_safe_ratio * delta_t_conflict
        path_correction: 用于将欧式距离放大以近似路径长度
        beta_softmax: 用于 softmax 意图预测 的确定性
        c_move: 时间成本系数（value 单位需匹配）
        KL_threshold: P 分布变化阈值触发重分配
        min_win_threshold: 当预测胜率低于该阈值时视为“无法赢”，施加惩罚
        no_win_penalty: 对"无法赢"目标的收益惩罚量（数值）
        """
        self.A = boats_A
        self.B = boats_B
        self.R = resources
        self.delta_t_conflict = delta_t_conflict
        self.delta_t_safe = conflict_safe_ratio * delta_t_conflict
        self.path_correction = path_correction
        self.beta = beta_softmax
        self.c_move = c_move
        self.KL_threshold = KL_threshold
        self.min_win_threshold = min_win_threshold
        self.no_win_penalty = no_win_penalty

        # 保存上一次的 P 矩阵以检测重分配触发
        self.prev_P = None

    # 运动学到达时间估算（考虑当前速度、加速度、最大速度）
    def estimate_arrival_time(self, boat: Boat, target_pos):
        dist = np.linalg.norm(np.array(target_pos, dtype=float) - boat.pos) * self.path_correction
        v0 = np.linalg.norm(boat.vel)
        vmax = boat.max_speed
        amax = boat.max_acc
        if dist < 1e-6:
            return 0.0
        # 若当前速度已经接近 vmax，近似匀速
        if v0 >= vmax * 0.999:
            return dist / vmax
        # 若加速度很小，退化为匀速 (避免除0)
        if amax < 1e-6:
            v_eff = max(v0, 1e-3)
            return dist / v_eff
        # 先计算加速到 vmax 的时间与路程
        t_to_vmax = max(0.0, (vmax - v0) / amax)
        s_acc = v0 * t_to_vmax + 0.5 * amax * t_to_vmax ** 2
        if s_acc >= dist:
            # 在加速阶段就能到达，解 0.5 a t^2 + v0 t - dist = 0
            a = 0.5 * amax
            b = v0
            c = -dist
            disc = b * b - 4 * a * c
            if disc < 0:
                return dist / max(v0, 1e-3)
            t = (-b + math.sqrt(disc)) / (2 * a)
            if t < 0:
                t = (-b - math.sqrt(disc)) / (2 * a)
            return max(0.0, t)
        else:
            # 加速到 vmax 后匀速
            s_rem = dist - s_acc
            return t_to_vmax + s_rem / vmax

    def predict_B_intent_prob(self):
        """
        返回 P 矩阵 shape (num_B, num_R)
        使用 softmax(-beta * cost) 的模型，其中 cost = est_time / value (数值越小越有利)
        """
        num_B = len(self.B)
        num_R = len(self.R)
        P = np.zeros((num_B, num_R))
        for i, b in enumerate(self.B):
            costs = []
            for r in self.R:
                est_t = self.estimate_arrival_time(b, r.pos)
                # cost 考虑到资源价值，较高价值更吸引
                cost = est_t / (r.value + 1e-6)
                costs.append(-cost)  # softmax 使用较大值对应高概率，所以取 -cost
            # softmax with temperature beta
            probs = softmax(np.array(costs), beta=self.beta)
            P[i, :] = probs
        return P

    def compute_bar_times_and_caps(self, P):
        # bar_tB[k] = sum_j P[j,k] * tB_jk
        num_B = len(self.B)
        num_R = len(self.R)
        bar_tB = np.zeros(num_R)
        bar_CB = np.zeros(num_R)
        for k, r in enumerate(self.R):
            t_list = np.array([ self.estimate_arrival_time(b, r.pos) for b in self.B ])
            cap_list = np.array([ b.capability for b in self.B ])
            # 权重 P[:,k] 是每个 B 去该资源的概率
            weights = P[:, k]
            bar_tB[k] = np.dot(weights, t_list)
            bar_CB[k] = np.dot(weights, cap_list)
        return bar_tB, bar_CB

    def compute_EV_matrix(self, P):
        """
        计算 EV 矩阵 (nA x nR)
        EV_ik = v_k * Pr_win - c_move * tA_ik
        Pr_win 根据 bar_tB, bar_CB, tA_ik, capability 分段计算
        """
        num_A = len(self.A)
        num_R = len(self.R)
        EV = np.zeros((num_A, num_R))
        Pr_win = np.zeros((num_A, num_R))
        # 先 compute bar_tB, bar_CB
        bar_tB, bar_CB = self.compute_bar_times_and_caps(P)

        for i, a in enumerate(self.A):
            for k, r in enumerate(self.R):
                tA = self.estimate_arrival_time(a, r.pos)
                delta = bar_tB[k] - tA  # positive -> A 预计先到
                # piecewise 判定 Pr_win
                if delta > self.delta_t_safe:
                    pr = 1.0
                elif delta < -self.delta_t_safe:
                    pr = 0.0
                elif abs(delta) <= self.delta_t_conflict:
                    # 在冲突窗口内，用能力比决定（用 A 的 capability 与 敌方期望能力 bar_CB）
                    denom = (a.capability + bar_CB[k]) if (a.capability + bar_CB[k]) > EPS else EPS
                    pr = a.capability / denom
                else:
                    # 在 (delta_conflict, delta_safe) 的过渡区，用线性插值平滑
                    # 当 delta = delta_conflict -> use capability-based pr
                    # 当 delta = delta_safe -> pr -> 1.0 (or 0 if negative side)
                    sign = 1 if delta > 0 else -1
                    # left and right values:
                    if delta > 0:
                        pr_conflict = a.capability / max(a.capability + bar_CB[k], EPS)
                        # linearly move from pr_conflict at delta_conflict to 1.0 at delta_safe
                        t = (delta - self.delta_t_conflict) / (self.delta_t_safe - self.delta_t_conflict + EPS)
                        pr = pr_conflict + t * (1.0 - pr_conflict)
                    else:
                        pr_conflict = a.capability / max(a.capability + bar_CB[k], EPS)
                        # move from pr_conflict at -delta_conflict to 0 at -delta_safe
                        t = (abs(delta) - self.delta_t_conflict) / (self.delta_t_safe - self.delta_t_conflict + EPS)
                        pr = pr_conflict - t * (pr_conflict - 0.0)
                Pr_win[i, k] = float(np.clip(pr, 0.0, 1.0))
                ev = r.value * Pr_win[i, k] - self.c_move * tA
                # 如果预测胜率太低，则施加惩罚，促使分配去次优的更可靠目标
                if Pr_win[i, k] < self.min_win_threshold:
                    ev -= self.no_win_penalty
                EV[i, k] = ev
        return EV, Pr_win, bar_tB, bar_CB

    def assign_tasks(self, EV):
        """
        采用中心化不重复分配：
         - 若有 scipy 可用：使用 Hungarian（最小化 cost=-EV）
         - 否则对 m >= nA 的情况用穷举 permutations（当资源点数小于 ~10 时可行）
        返回 assignments: list length nA 指定资源索引或 None
        """
        num_A = len(self.A)
        num_R = len(self.R)
        # 若资源数 >= A 数，枚举不重复组合
        best_assign = None
        best_val = -1e12

        if num_R >= num_A:
            for perm in itertools.permutations(range(num_R), num_A):
                val = 0.0
                for i in range(num_A):
                    val += EV[i, perm[i]]
                if val > best_val:
                    best_val = val
                    best_assign = list(perm)
        else:
            # 资源少于 A 的情况，允许部分 A 不分配（assign None）或允许重复（选择策略）
            # 这里采取：允许不分配（留 idle），从 resources 中选 num_R 给部分 A
            for subset in itertools.permutations(range(num_R), num_R):
                # subset mapped to first num_R A
                val = 0.0
                for i in range(num_R):
                    val += EV[i, subset[i]]
                if val > best_val:
                    best_val = val
                    best_assign = list(subset) + [None] * (num_A - num_R)
        # 若 best_assign 未找到（不太可能），将全部 idle
        if best_assign is None:
            best_assign = [None] * num_A
        return best_assign

    def compute_P_KL_change(self, new_P):
        """
        计算 prev_P 与 new_P 的 KL 变化（对每个 B 分别计算 KL，返回 max）
        若 prev_P is None 返回 huge value（以便初始化分配）
        """
        if self.prev_P is None:
            return float('inf')
        num_B = new_P.shape[0]
        kls = []
        for i in range(num_B):
            kls.append(kl_divergence(self.prev_P[i, :], new_P[i, :]))
        return max(kls)

    def step_and_assign_if_needed(self):
        """
        主接口：计算预测 P，EV，并在需要时重新分配 A 的目标
        返回 assignments (list of resource indices or None)，EV, Pr_win
        """
        P = self.predict_B_intent_prob()
        kl_change = self.compute_P_KL_change(P)

        EV, Pr_win, bar_tB, bar_CB = self.compute_EV_matrix(P)
        need_assign = False
        if kl_change > self.KL_threshold:
            need_assign = True

        # always assign at initialization
        if self.prev_P is None:
            need_assign = True

        # If need_assign, compute best assignment
        assignments = None
        if need_assign:
            assignments = self.assign_tasks(EV)
            # 更新 prev_P
            self.prev_P = P.copy()
        return assignments, EV, Pr_win, P, kl_change

# ---------- B 的简单策略（仿真中使用） ----------
def B_greedy_policy(boat: Boat, resources, rule="value_dist"):
    """
    B 的启发式目标选择：
      - "nearest": 最近
      - "highest_value": 价值最高
      - "value_dist": value / dist 最大
    """
    best_idx = None
    best_score = -1e12
    for r in resources:
        if r.status != "free":
            continue
        dist = np.linalg.norm(np.array(r.pos) - boat.pos)
        if rule == "nearest":
            score = -dist
        elif rule == "highest_value":
            score = r.value
        else:
            score = r.value / (dist + EPS)
        if score > best_score:
            best_score = score
            best_idx = r.id
    return best_idx


def segment_circle_first_hit(p0, p1, center, radius):
    """
    求线段 p0->p1 与以 center 为圆心、radius 为半径的圆的首次命中参数 u in [0,1]。
    返回 u 或 None（不命中）。命中时间 = t0 + u * dt。
    """
    p0 = np.asarray(p0, dtype=float)
    p1 = np.asarray(p1, dtype=float)
    c = np.asarray(center, dtype=float)
        # --- 关键兜底：零位移 / 起点已在圆内也算命中 ---
    if np.linalg.norm(p0 - c) <= radius + 1e-12:
        return 0.0
    d = p1 - p0
    f = p0 - c
    A = np.dot(d, d)
    B = 2.0 * np.dot(f, d)
    C = np.dot(f, f) - radius * radius
    disc = B * B - 4 * A * C
    if A < 1e-12 or disc < 0:
        return None

    sqrt_disc = math.sqrt(disc)
    u1 = (-B - sqrt_disc) / (2 * A)
    u2 = (-B + sqrt_disc) / (2 * A)

    # 取 [0,1] 中的最小非负根（首次命中）
    candidates = [u for u in (u1, u2) if 0.0 <= u <= 1.0]
    if not candidates:
        return None
    return min(candidates)


# ====== 环境：补完 SimulationEnv ======
class SimulationEnv:
    def __init__(self, bounds=(0, 20, 0, 20), num_resources=4, seed=42,
                 value_range=(10, 30), capture_radius=2.0, cooldown=10.0):
        random.seed(seed)
        np.random.seed(seed)
        self.bounds = bounds  # (xmin, xmax, ymin, ymax)
        self.num_resources = num_resources
        self.value_range = value_range
        self.capture_radius = capture_radius
        self.cooldown = cooldown
        self.time = 0.0
        self.last_A_capture_time = 0.0
        self.stagnation_timeout = 8.0
        # 创建资源
        self.resources = []
        for i in range(num_resources):
            pos = (random.uniform(bounds[0] + 1, bounds[1] - 1),
                   random.uniform(bounds[2] + 1, bounds[3] - 1))
            val = random.uniform(*value_range)
            self.resources.append(Resource(i, pos, val,
                                           capture_radius=capture_radius,
                                           cooldown=cooldown))

        # 运行时对象（由外部注入）
        self.A = []        # list[Boat]
        self.B = []        # list[Boat]
        self.planner = None
        self.intent_log = []   # list of (t, P)  ; P shape (num_B, num_R)
        self.res_log = []      # list of (t, [(pos, value) for each resource])
    def attach_fleets(self, boats_A, boats_B):
        self.A = boats_A
        self.B = boats_B
        for boat in (self.A + self.B):
            boat.history = [(self.time, float(boat.pos[0]), float(boat.pos[1]))]       
    def attach_planner(self, planner: 'CPEVAPlanner', force_simple_euclidean=True):
        """
        绑定 C-PEVA 规划器；可选：强制其到达时间估计为 “欧式距离 / vmax”（满足你的路径假设）
        """
        self.planner = planner

        if force_simple_euclidean:
            def _simple_arrival_time(boat: Boat, target_pos):
                dist = np.linalg.norm(np.asarray(target_pos, float) - boat.pos) * planner.path_correction
                return dist / max(boat.max_speed, 1e-6)
            # 替换掉类里原本的到达时间估计（保持你要的简化模型）
            planner.estimate_arrival_time = _simple_arrival_time

    # —— 资源刷新（冷却完毕后随机重生）——
    def _respawn_resource(self, r: Resource):
        x = random.uniform(self.bounds[0] + 1, self.bounds[1] - 1)
        y = random.uniform(self.bounds[2] + 1, self.bounds[3] - 1)
        r.pos = np.array((x, y), dtype=float)
        r.value = random.uniform(*self.value_range)
        r.status = "free"
        r.owner = None
        r.cool_timer = 0.0

    # —— 单步推进 —— 
    def step(self, dt=0.2, B_rule="value_dist", reassign_on_capture=True):
        """
        推进 dt 时间，执行：意图预测/分配（A）、贪婪目标（B）、运动、到达判定、资源刷新
        """
        self.time += dt

        # 1) B 侧（简单启发式）：若无目标或目标不可用，则重新选择
        for b in self.B:
            if (b.target_id is None) or (b.target_id >= len(self.resources)) or (self.resources[b.target_id].status != "free"):
                rid = B_greedy_policy(b, self.resources, rule=B_rule)
                b.set_target(self.resources[rid] if rid is not None else None)

        # 2) A 侧：基于 C-PEVA 的概率预测 + 期望收益分配（带 KL 触发的动态重分配）
        assignments, EV, Pr_win, P, kl = self.planner.step_and_assign_if_needed()
        self.intent_log.append((self.time, P.copy()))
        self.res_log.append((self.time, [(r.pos.copy(), float(r.value)) for r in self.resources]))
        if assignments is not None:
            # 应用新的分配结果
            for i, a in enumerate(self.A):
                rid = assignments[i]
                a.set_target(self.resources[rid] if rid is not None else None)

        # 3) 船运动一步（先记录 prev_pos 再 step）
        for boat in (self.A + self.B):
            boat.step(dt)
            boat.history.append((self.time, float(boat.pos[0]), float(boat.pos[1])))

        # --- 边界裁剪（clip）+ 越界刹停 ---
        xmin, xmax, ymin, ymax = self.bounds
        for boat in (self.A + self.B):
            clipped_x = min(max(boat.pos[0], xmin), xmax)
            clipped_y = min(max(boat.pos[1], ymin), ymax)
            if (clipped_x != boat.pos[0]) or (clipped_y != boat.pos[1]):
                boat.pos[0], boat.pos[1] = clipped_x, clipped_y
                boat.vel *= 0.0          # 刹停，避免继续外飘
                # 可选：B 立刻改新目标（在边上容易“顶边”）
                if boat.group == "B":
                    rid = B_greedy_policy(boat, self.resources, rule=B_rule)
                    boat.set_target(self.resources[rid] if rid is not None else None)

        # 4) 到达判定：检查每条线段是否进入任一“资源圆”
        for r in self.resources:
            if r.status != "free":
                continue
            for boat in (self.A + self.B):
                # 只对正驶向该资源的船做判定（可选优化）
                if boat.target_id != r.id:
                    continue
                u = segment_circle_first_hit(boat.prev_pos, boat.pos, r.pos, r.capture_radius)
                if u is not None:
                    crossing_time = self.time - dt + u * dt
                    r.mark_arrival(boat, crossing_time)

        # 5) 解决到达冲突/胜负，发放收益，资源进入冷却
        any_captured = False
        for r in self.resources:
            winner = r.resolve_arrivals(self.planner.delta_t_conflict,
                                        self.planner.delta_t_safe,
                                        self.time)
            if winner is not None:
                reward = r.set_captured(winner, self.time)
                winner.total_reward += reward
                any_captured = True
                winner.clear_target()
                if winner.group == "A":
                    self.last_A_capture_time = self.time

        # 6) 冷却计时与刷新
        for r in self.resources:
            became_free = r.step_cooldown(dt)
            if became_free:
                self._respawn_resource(r)
                # --- 同步 A 的目标坐标；若目标资源已非 free，则清空等待重分配 ---
        
        # 新增1
        for a in self.A:
            if a.target_id is None:
                continue
            r = self.resources[a.target_id]
            if r.status == "free":
                a.target_pos = r.pos.copy()  # 资源坐标变了，同步
            else:
                a.clear_target()              # 目标失效，等下一次分配
        # --- 若有 A 处于 idle（target 被清空或本来就没有），立刻触发一次重分配 ---
        if any(a.target_id is None for a in self.A):
            self.planner.prev_P = None  # 强制 need_assign = True
            assignments, EV, Pr_win, P, kl = self.planner.step_and_assign_if_needed()
            if assignments is not None:
                for i, a in enumerate(self.A):
                    rid = assignments[i]
                    a.set_target(self.resources[rid] if rid is not None else None)


        # 7) 若发生占领事件，强制重分配（确保 A 不盯着失效目标发呆）
        if reassign_on_capture and any_captured:
            self.planner.prev_P = None  # 关键：强制 need_assign = True
            assignments, EV, Pr_win, P, kl = self.planner.step_and_assign_if_needed()
            if assignments is not None:
                for i, a in enumerate(self.A):
                    rid = assignments[i]
                    a.set_target(self.resources[rid] if rid is not None else None)

                # 如果 A 很久没得分，强制一次重分配，避免一直 idle
        if (self.time - getattr(self, "last_A_capture_time", 0.0)) > getattr(self, "stagnation_timeout", 1e9):
            self.planner.prev_P = None
            assignments, EV, Pr_win, P, kl = self.planner.step_and_assign_if_needed()
            if assignments is not None:
                for i, a in enumerate(self.A):
                    rid = assignments[i]
                    a.set_target(self.resources[rid] if rid is not None else None)
            self.last_A_capture_time = self.time

        # 返回一些监控量
        return {
            "time": self.time,
            "A_rewards": [a.total_reward for a in self.A],
            "B_rewards": [b.total_reward for b in self.B],
            "KL_change": kl,
        }

    # —— 示例运行（可选）——
    def run_episode(self, T=60.0, dt=0.2, log_every=5.0,B_rule="nearest"):
        next_log = 0.0
        logs = []
        while self.time < T:
            info = self.step(dt=dt, B_rule=B_rule)
            if self.time >= next_log:
                logs.append((self.time, sum(info["A_rewards"]), sum(info["B_rewards"])))
                next_log += log_every
        return logs


# ====== 可视化工具（Matplotlib，无第三方依赖） ======
def _gaussian_blob(grid_x, grid_y, cx, cy, sigma):
    return np.exp(-((grid_x - cx)**2 + (grid_y - cy)**2) / (2.0 * sigma**2))

def plot_episode(env, logs, out_prefix="sim"):
    """
    生成三张图：
      1) 轨迹图：A/B 的全程轨迹 + 资源最终位置
      2) 累计收益曲线（利用 run_episode 的 logs）
      3) 敌方意图热力图（按 P 对各资源点做高斯扩散并累加）
    """
    from matplotlib import pyplot as plt

    xmin, xmax, ymin, ymax = env.bounds
    # 1) 轨迹
    fig1 = plt.figure(figsize=(6, 6))
    for b in env.A:
        arr = np.array(b.history)  # (t, x, y)
        plt.plot(arr[:,1], arr[:,2], label=f"A{b.id}")
        plt.scatter(arr[-1,1], arr[-1,2], marker="o")
    for b in env.B:
        arr = np.array(b.history)
        plt.plot(arr[:,1], arr[:,2], linestyle="--", label=f"B{b.id}")
        plt.scatter(arr[-1,1], arr[-1,2], marker="x")
    # 资源最终位置
    xs = [r.pos[0] for r in env.resources]
    ys = [r.pos[1] for r in env.resources]
    plt.scatter(xs, ys, marker="s")
    plt.xlim(xmin, xmax); plt.ylim(ymin, ymax)
    plt.xlabel("x"); plt.ylabel("y"); plt.title("Boat trajectories")
    plt.legend(); plt.tight_layout()
    fig1.savefig(f"{out_prefix}_traj.png", dpi=150)

    # 2) 累计收益曲线
    if logs:
        ts = [t for (t, _, _) in logs]
        As = [a for (_, a, _) in logs]
        Bs = [b for (_, _, b) in logs]
        fig2 = plt.figure(figsize=(6, 4))
        plt.plot(ts, As, label="A_total")
        plt.plot(ts, Bs, label="B_total")
        plt.xlabel("t (s)"); plt.ylabel("total reward"); plt.title("Cumulative rewards")
        plt.legend(); plt.tight_layout()
        fig2.savefig(f"{out_prefix}_rewards.png", dpi=150)

    # 3) 敌意图热力图
    # 把每一步“资源点的被 B 争夺概率” w_k = sum_i P[i,k] 当作该点的强度，做高斯扩散累加
    nx = ny = 120
    gx = np.linspace(xmin, xmax, nx)
    gy = np.linspace(ymin, ymax, ny)
    grid_x, grid_y = np.meshgrid(gx, gy, indexing="xy")
    heat = np.zeros_like(grid_x)
    sigma = env.capture_radius * 2.5  # 扩散尺度，可调

    # intent_log 与 res_log 是一一对应按时间记录的
    for step, ((tP, P), (tR, res_snapshot)) in enumerate(zip(env.intent_log, env.res_log)):
        assert abs(tP - tR) < 1e-6
        w = P.sum(axis=0)  # 对所有 B 汇总该步对每个资源的争夺概率
        for k, ((pos, val), wk) in enumerate(zip(res_snapshot, w)):
            heat += wk * _gaussian_blob(grid_x, grid_y, pos[0], pos[1], sigma)

    fig3 = plt.figure(figsize=(6, 6))
    plt.imshow(
        heat.T, origin="lower",
        extent=(xmin, xmax, ymin, ymax),
        aspect="equal", alpha=0.85
    )
    plt.xlabel("x"); plt.ylabel("y"); plt.title("B-intent heatmap (integrated)")
    plt.tight_layout()
    fig3.savefig(f"{out_prefix}_intent_heatmap.png", dpi=150)
# ====== 动图：逐帧演示对抗过程 ======
def animate_episode(make_env_fn, T=60.0, dt=0.2, out_path="demo_anim.gif", B_rule="value_dist"):
    """
    make_env_fn: 一个无参函数，返回一个全新、已 attach_fleets/attach_planner 的 env
                 （避免复用跑过的 env 导致 time/history 累积）
    T, dt: 仿真时长与步长
    out_path: GIF 文件保存路径
    """
    from matplotlib import pyplot as plt
    from matplotlib import animation
    env = make_env_fn()  # fresh env
    frames = int(T / dt)

    xmin, xmax, ymin, ymax = env.bounds
    fig, ax = plt.subplots(figsize=(6, 6))
    ax.set_xlim(xmin, xmax); ax.set_ylim(ymin, ymax)
    ax.set_title("t = 0.0 s")
    ax.set_xlabel("x"); ax.set_ylabel("y")

    # 船只散点（A:圆点，B:叉号）
    a_handles = [ax.plot([], [], "o", label=f"A{i}", animated=True)[0] for i, _ in enumerate(env.A)]
    b_handles = [ax.plot([], [], "x", label=f"B{i}", animated=True)[0] for i, _ in enumerate(env.B)]
    # 资源散点（颜色表示状态）
    res_scatter = ax.scatter([r.pos[0] for r in env.resources],
                         [r.pos[1] for r in env.resources],
                         marker="s", animated=True)
    legend = ax.legend(loc="upper left")

    def _color_for(r):
        # free: 默认; captured: 橙; cooldown: 蓝
        return ("tab:green" if r.status == "free"
                else "tab:orange" if r.status == "captured"
                else "tab:blue")

    def init():
        for h in a_handles + b_handles:
            h.set_data([], [])
        res_scatter.set_offsets(np.empty((0, 2)))
        return a_handles + b_handles + [res_scatter, legend]

    def update(frame_idx):
        info = env.step(dt=dt, B_rule=B_rule)  # 推进一步（内部也会重分配/刷新）
        # 船
        for i, b in enumerate(env.A):
            a_handles[i].set_data([b.pos[0]], [b.pos[1]])
        for i, b in enumerate(env.B):
            b_handles[i].set_data([b.pos[0]], [b.pos[1]])
        # 资源（位置+颜色）
        xs = [r.pos[0] for r in env.resources]
        ys = [r.pos[1] for r in env.resources]
        res_scatter.set_offsets(np.c_[xs, ys])
        res_scatter.set_color([_color_for(r) for r in env.resources])

        ax.set_title(f"t = {env.time:.1f}s   A:{sum(info['A_rewards']):.1f}  B:{sum(info['B_rewards']):.1f}")
        return a_handles + b_handles + [res_scatter, legend]

    ani = animation.FuncAnimation(fig, update, init_func=init, frames=frames,
                                  interval=max(1, int(1000*dt/2)), blit=True)
    try:
        ani.save(out_path, writer="pillow", fps=int(1/max(dt, 1e-3)))
    finally:
        plt.close(fig)  # 防止 "More than 20 figures..." 警告
    return out_path



# ====== 示例：组装并运行 ======
if __name__ == "__main__":
    # —— 原来的静态一集 —— 
    env = SimulationEnv(bounds=(0, 40, 0, 40), num_resources=6, seed=8)
    A = [
        Boat(bid=0, group="A", pos=(2.0, 2.0),  capability=1.1, max_speed=1, max_acc=0.4),
        Boat(bid=1, group="A", pos=(3.0, 17.0), capability=0.9, max_speed=1, max_acc=0.4),
    ]
    B = [
        Boat(bid=0, group="B", pos=(18.0, 18.0), capability=1.0, max_speed=1, max_acc=0.4),
        Boat(bid=1, group="B", pos=(17.0, 3.0),  capability=0.8, max_speed=1, max_acc=0.4),
    ]
    env.attach_fleets(A, B)
    planner = CPEVAPlanner(A, B, env.resources,
                           delta_t_conflict=2.0, conflict_safe_ratio=1.5,
                           path_correction=1.0, beta_softmax=1,
                           c_move=0.1, KL_threshold=0.1,
                           min_win_threshold=0.25, no_win_penalty=50.0)
    env.attach_planner(planner, force_simple_euclidean=True)

    logs = env.run_episode(T=20.0, dt=0.2, log_every=10.0, B_rule="nearest")
    print("t(s)\tA_total\tB_total")
    for t, a_sum, b_sum in logs:
        print(f"{t:5.1f}\t{a_sum:6.1f}\t{b_sum:6.1f}")
    # 只画一次静态图，避免 figure 过多
    plot_episode(env, logs, out_prefix="demo")

    # —— 生成动图（用一个“全新环境”避免时间/历史叠加）——
    def _make_env():
        env2 = SimulationEnv(bounds=(0, 40, 0, 40), num_resources=6, seed=8)
        A2 = [
            Boat(bid=0, group="A", pos=(2.0, 2.0),  capability=1, max_speed=1, max_acc=0.4),
            Boat(bid=1, group="A", pos=(3.0, 17.0), capability=1, max_speed=1, max_acc=0.4),
        ]
        B2 = [
            Boat(bid=0, group="B", pos=(18.0, 18.0), capability=0.9, max_speed=1, max_acc=0.4),
            Boat(bid=1, group="B", pos=(17.0, 3.0),  capability=0.9, max_speed=1, max_acc=0.4),
        ]
        env2.attach_fleets(A2, B2)
        planner2 = CPEVAPlanner(A2, B2, env2.resources,
                                delta_t_conflict=2.0, conflict_safe_ratio=1.5,
                                path_correction=1.0, beta_softmax=1,
                                c_move=0.1, KL_threshold=0.1,
                                min_win_threshold=0.25, no_win_penalty=50.0)
        env2.attach_planner(planner2, force_simple_euclidean=True)
        return env2

    gif_path = animate_episode(_make_env, T=20.0, dt=0.2, out_path="demo_anim.gif",B_rule="nearest")
    print("Saved animation:", gif_path)
