# Copyright (c) Ruopeng Gao. All Rights Reserved.

import os
import torch
import einops
from scipy.optimize import linear_sum_assignment
from collections import deque
import math
from accelerate.state import PartialState

from structures.instances import Instances
from structures.ordered_set import OrderedSet
from utils.misc import distributed_device
from utils.box_ops import box_cxcywh_to_xywh
from models.misc import get_model


class RuntimeTracker:
    def __init__(
            self,
            model,
            # Sequence infos:
            sequence_hw: tuple,
            # Inference settings:
            use_sigmoid: bool = False,
            assignment_protocol: str = "hungarian",
            miss_tolerance: int = 30,
            det_thresh: float = 0.5,
            newborn_thresh: float = 0.5,
            id_thresh: float = 0.1,
            area_thresh: int = 0,
            only_detr: bool = False,
            dtype: torch.dtype = torch.float32,
            # LWG & buffer (optional):
            use_lwg: bool = False,
            lwg_model=None,
            lwg_main_th: float = 0.6,
            lwg_buf_th: float = 0.4,
            buffer_capacity: int = 10,
            buffer_promote_steps: int = 3,
            use_buffer_gate: bool = False,
            buffer_gate_model=None,
            buffer_gate_thresh: float = 0.5,
            buffer_history_window: int = 3,
            # LWG feature export (optional):
            export_lwg_features: bool = False,
            lwg_feature_dump_dir: str | None = None,
            # Buffer gate data export (optional):
            export_buffer_gate_features: bool = False,
            buffer_gate_dump_dir: str | None = None,
            *,
            process_rank: int = 0,
            world_size: int = 1,
            is_main_process: bool = True,
            state: PartialState | None = None,
    ):
        self.model = model
        self.model.eval()

        if state is not None:
            process_rank = state.process_index
            world_size = state.num_processes
            is_main_process = state.is_main_process

        self.process_rank = process_rank
        self.world_size = world_size
        self.is_main_process = is_main_process
        self._state_obj = state
        self._state = "UNINITIALIZED"
        self._skip_sequence = False

        self.dtype = dtype

        # For FP16:
        if self.dtype != torch.float32:
            if self.dtype == torch.float16:
                self.model.half()
            else:
                raise NotImplementedError(f"Unsupported dtype {self.dtype}.")

        self.use_sigmoid = use_sigmoid
        self.assignment_protocol = assignment_protocol.lower()
        self.miss_tolerance = miss_tolerance
        self.det_thresh = det_thresh
        self.newborn_thresh = newborn_thresh
        self.id_thresh = id_thresh
        self.area_thresh = area_thresh
        self.only_detr = only_detr
        self.num_id_vocabulary = get_model(model).num_id_vocabulary

        # Check for the legality of settings:
        assert self.assignment_protocol in ["hungarian", "id-max", "object-max", "object-priority", "id-priority"], \
            f"Assignment protocol {self.assignment_protocol} is not supported."
        self.bbox_unnorm = torch.tensor(
            [sequence_hw[1], sequence_hw[0], sequence_hw[1], sequence_hw[0]],
            dtype=dtype,
            device=distributed_device(),
        )

        # Trajectory fields:
        self.next_id = 0
        self.id_label_to_id = {}
        self.id_queue = OrderedSet()
        for i in range(self.num_id_vocabulary):
            self.id_queue.add(i)
        self.trajectory_features = torch.zeros(
            (0, 0, 256), dtype=dtype, device=distributed_device(),
        )
        self.trajectory_boxes = torch.zeros(
            (0, 0, 4), dtype=dtype, device=distributed_device(),
        )
        self.trajectory_id_labels = torch.zeros(
            (0, 0), dtype=torch.int64, device=distributed_device(),
        )
        self.trajectory_times = torch.zeros(
            (0, 0), dtype=dtype, device=distributed_device(),
        )
        self.trajectory_masks = torch.zeros(
            (0, 0), dtype=torch.bool, device=distributed_device(),
        )

        self.current_track_results = {}
        self._is_initialized = False

        # LWG & buffer states:
        self.use_lwg = use_lwg
        self.lwg_model = lwg_model
        self.lwg_main_th = lwg_main_th
        self.lwg_buf_th = lwg_buf_th
        self.buffer_capacity = int(os.getenv("BUFFER_CAPACITY", str(buffer_capacity)))
        self.buffer_promote_steps = int(os.getenv("BUFFER_PROMOTE_STEPS", str(buffer_promote_steps)))

        self.lwg_cold_start_steps = int(os.getenv("LWG_COLD_START_STEPS", "100"))
        self.lwg_dynamic_window = int(os.getenv("LWG_DYNAMIC_WINDOW", "50"))
        self.lwg_low_p_mean = float(os.getenv("LWG_LOW_P_MEAN", "0.08"))
        self.lwg_min_write_rate = float(os.getenv("LWG_MIN_WRITE_RATE", "0.2"))
        self.lwg_safe_buf_th = float(os.getenv("LWG_SAFE_BUF_TH", "0.03"))
        self._p_window: deque[float] = deque(maxlen=self.lwg_dynamic_window)
        self._write_window: deque[float] = deque(maxlen=self.lwg_dynamic_window)

        if self.use_lwg and self.lwg_model is not None:
            try:
                self.lwg_model.eval()
                self.lwg_model.to(distributed_device())
            except Exception:
                pass

        self.export_lwg_features = export_lwg_features
        self.lwg_feature_dump_dir = lwg_feature_dump_dir
        self.export_buffer_gate_features = export_buffer_gate_features and buffer_gate_dump_dir is not None
        self.buffer_gate_dump_dir = buffer_gate_dump_dir
        self.sequence_name = None
        self.timestep = 0
        if self.export_lwg_features and self.lwg_feature_dump_dir is not None:
            os.makedirs(self.lwg_feature_dump_dir, exist_ok=True)
        if self.export_buffer_gate_features and self.buffer_gate_dump_dir is not None:
            os.makedirs(self.buffer_gate_dump_dir, exist_ok=True)

        self.lwg_stats = {
            "total": 0,
            "buffered": 0,
            "written": 0,
            "p_sum": 0.0,
            "p_min": None,
            "p_max": None,
            "below_buf": 0,
            "below_main": 0,
        }
        self._p_window.clear()
        self._write_window.clear()
        self._ewma_p_write = None
        self.lwg_adaptive_alpha = float(os.getenv("LWG_ADAPTIVE_ALPHA", "0.1"))
        self.lwg_adaptive_gain = float(os.getenv("LWG_ADAPTIVE_GAIN", "0.2"))
        self._export_buffer = []
        self._export_flush_size = int(os.getenv("LWG_EXPORT_FLUSH_SIZE", "256"))
        self._lwg_debug: list[dict] = []
        self._lwg_last_sim: dict[int, float] = {}
        self._buffer_export_buffer = []
        self._buffer_export_flush_size = int(os.getenv("BUFFER_GATE_EXPORT_FLUSH_SIZE", "256"))
        self._buffer_export_header = [
            "seq",
            "t",
            "id_label",
            "label",
            "len",
            "p_mean",
            "p_min",
            "p_max",
            "p_last",
            "iou_mean",
            "iou_min",
            "iou_max",
            "iou_last",
            "delta_mean",
            "delta_min",
            "delta_max",
            "delta_last",
            "margin_mean",
            "s_det_mean",
            "s_det_last",
            "area_mean",
            "aspect_mean",
        ]
        self.use_buffer_gate = use_buffer_gate and (buffer_gate_model is not None)
        self.buffer_gate_model = buffer_gate_model if self.use_buffer_gate else None
        self.buffer_gate_thresh = float(os.getenv("BUFFER_GATE_THRESH", str(buffer_gate_thresh)))
        self.buffer_history_window = max(1, int(os.getenv("BUFFER_HISTORY_WINDOW", str(buffer_history_window))))
        self._quarantine_state: dict[int, dict] = {}
        self._buffer_records: dict[int, deque[dict]] = {}
        self._buffer_gate_device = distributed_device()
        if self.use_buffer_gate and self.buffer_gate_model is not None:
            try:
                self.buffer_gate_model.eval()
                self.buffer_gate_model.to(distributed_device())
                try:
                    self._buffer_gate_device = next(self.buffer_gate_model.parameters()).device
                except StopIteration:
                    self._buffer_gate_device = distributed_device()
            except Exception:
                pass
        return

    @torch.no_grad()
    def update(self, image):
        if self._state != "READY":
            raise RuntimeError(f"RuntimeTracker.update called while state={self._state}")
        detr_out = self.model(frames=image, part="detr")
        scores, categories, boxes, output_embeds = self._get_activate_detections(detr_out=detr_out)
        if self.only_detr:
            id_pred_labels = self.num_id_vocabulary * torch.ones(boxes.shape[0], dtype=torch.int64, device=boxes.device)
        else:
            id_pred_labels = self._get_id_pred_labels(boxes=boxes, output_embeds=output_embeds)
        # Filter out illegal newborn detections:
        keep_idxs = (id_pred_labels != self.num_id_vocabulary) | (scores > self.newborn_thresh)
        scores = scores[keep_idxs]
        categories = categories[keep_idxs]
        boxes = boxes[keep_idxs]
        output_embeds = output_embeds[keep_idxs]
        id_pred_labels = id_pred_labels[keep_idxs]
        id_pred_labels_for_lwg = id_pred_labels.clone()

        # A hack implementation, before assign new id labels, update the id_queue to ensure the uniqueness of id labels:
        n_activate_id_labels = 0
        n_newborn_targets = 0
        for _ in range(len(id_pred_labels)):
            if id_pred_labels[_].item() != self.num_id_vocabulary:
                n_activate_id_labels += 1
                self.id_queue.add(id_pred_labels[_].item())
            else:
                n_newborn_targets += 1

        # Make sure the length of newborn instances is less than the length of remaining IDs:
        n_remaining_ids = len(self.id_queue) - n_activate_id_labels
        if n_newborn_targets > n_remaining_ids:
            keep_idxs = torch.ones(len(id_pred_labels), dtype=torch.bool, device=id_pred_labels.device)
            newborn_idxs = (id_pred_labels == self.num_id_vocabulary)
            newborn_keep_idxs = torch.ones(len(newborn_idxs), dtype=torch.bool, device=newborn_idxs.device)
            newborn_keep_idxs[n_remaining_ids:] = False
            keep_idxs[newborn_idxs] = newborn_keep_idxs
            scores = scores[keep_idxs]
            categories = categories[keep_idxs]
            boxes = boxes[keep_idxs]
            output_embeds = output_embeds[keep_idxs]
            id_pred_labels = id_pred_labels[keep_idxs]
            id_pred_labels_for_lwg = id_pred_labels_for_lwg[keep_idxs]
        pass

        # Assign new id labels:
        id_labels = self._assign_newborn_id_labels(pred_id_labels=id_pred_labels)

        if len(torch.unique(id_labels)) != len(id_labels):
            print(id_labels, id_labels.shape)
            exit(-1)

        # Update the results:
        self.current_track_results = {
            "score": scores,
            "category": categories,
            # "bbox": boxes * self.bbox_unnorm,
            "bbox": box_cxcywh_to_xywh(boxes) * self.bbox_unnorm,
            "id": torch.tensor(
                [self.id_label_to_id[_] for _ in id_labels.tolist()], dtype=torch.int64,
            ),
        }

        # Update id_queue:
        for _ in range(len(id_labels)):
            self.id_queue.add(id_labels[_].item())

        # Update trajectory infos:
        self._update_trajectory_infos(
            boxes=boxes,
            output_embeds=output_embeds,
            id_labels=id_labels,
            id_pred_labels_for_lwg=id_pred_labels_for_lwg,
        )

        # Filter out inactive tracks:
        self._filter_out_inactive_tracks()
        pass
        return

    def get_track_results(self):
        return self.current_track_results

    def set_sequence_name(self, name: str, is_fake: bool = False):
        self.sequence_name = name
        self._state = "READY"
        self._skip_sequence = is_fake
        self.timestep = 0
        self._p_window.clear()
        self._write_window.clear()
        self._ewma_p_write = None
        self._lwg_debug = []
        self._export_buffer.clear()
        self._buffer_export_buffer.clear()
        self._buffer_records.clear()
        self._quarantine_state.clear()
        self.lwg_stats = {
            "total": 0,
            "buffered": 0,
            "written": 0,
            "p_sum": 0.0,
            "p_min": None,
            "p_max": None,
            "below_buf": 0,
            "below_main": 0,
        }
        self._lwg_last_sim.clear()
        if self.export_buffer_gate_features and self.buffer_gate_dump_dir is not None:
            os.makedirs(self.buffer_gate_dump_dir, exist_ok=True)
        return

    def _get_activate_detections(self, detr_out: dict):
        logits = detr_out["pred_logits"][0]
        boxes = detr_out["pred_boxes"][0]
        output_embeds = detr_out["outputs"][0]
        scores = logits.sigmoid()
        scores, categories = torch.max(scores, dim=-1)
        area = boxes[:, 2] * self.bbox_unnorm[2] * boxes[:, 3] * self.bbox_unnorm[3]

        # 使用检测分数进行过滤
        activate_indices = (scores > self.det_thresh) & (area > self.area_thresh)
        # Selecting:
        # logits = logits[activate_indices]
        boxes = boxes[activate_indices]
        output_embeds = output_embeds[activate_indices]
        scores = scores[activate_indices]
        categories = categories[activate_indices]
        return scores, categories, boxes, output_embeds

    def _get_id_pred_labels(self, boxes: torch.Tensor, output_embeds: torch.Tensor):
        if self.trajectory_features.shape[0] == 0:
            return self.num_id_vocabulary * torch.ones(boxes.shape[0], dtype=torch.int64, device=boxes.device)
        else:
            # 1. prepare current infos:
            current_features = output_embeds[None, ...]     # (T, N, ...)
            current_boxes = boxes[None, ...]                # (T, N, 4)
            current_masks = torch.zeros((1, output_embeds.shape[0]), dtype=torch.bool, device=distributed_device())
            current_times = self.trajectory_times.shape[0] * torch.ones(
                (1, output_embeds.shape[0]), dtype=torch.int64, device=distributed_device(),
            )
            # 2. prepare seq_info:
            seq_info = {
                "trajectory_features": self.trajectory_features[None, None, ...],
                "trajectory_boxes": self.trajectory_boxes[None, None, ...],
                "trajectory_id_labels": self.trajectory_id_labels[None, None, ...],
                "trajectory_times": self.trajectory_times[None, None, ...],
                "trajectory_masks": self.trajectory_masks[None, None, ...],
                "unknown_features": current_features[None, None, ...],
                "unknown_boxes": current_boxes[None, None, ...],
                "unknown_masks": current_masks[None, None, ...],
                "unknown_times": current_times[None, None, ...],
            }
            # 3. forward:
            seq_info = self.model(seq_info=seq_info, part="trajectory_modeling")
            id_logits, _, _ = self.model(seq_info=seq_info, part="id_decoder")
            # 4. get scores:
            id_logits = id_logits[0, 0, 0]
            if not self.use_sigmoid:
                id_scores = id_logits.softmax(dim=-1)
            else:
                id_scores = id_logits.sigmoid()
            # 5. assign id labels:
            # Different assignment protocols:
            match self.assignment_protocol:
                case "hungarian": id_labels = self._hungarian_assignment(id_scores=id_scores)
                case "object-max": id_labels = self._object_max_assignment(id_scores=id_scores)
                case "id-max": id_labels = self._id_max_assignment(id_scores=id_scores)
                # case "object-priority": id_labels = self._object_priority_assignment(id_scores=id_scores)
                case _: raise NotImplementedError

            id_pred_labels = torch.tensor(id_labels, dtype=torch.int64, device=distributed_device())
            return id_pred_labels

    def _assign_newborn_id_labels(self, pred_id_labels: torch.Tensor):
        # 1. how many newborn instances?
        n_newborns = (pred_id_labels == self.num_id_vocabulary).sum().item()
        if n_newborns == 0:
            return pred_id_labels
        else:
            # 2. get available id labels from id_queue:
            newborn_id_labels = torch.tensor(
                list(self.id_queue)[:n_newborns], dtype=torch.int64, device=distributed_device(),
            )
            # 3. make sure these id labels are not in trajectory infos:
            trajectory_remove_idxs = torch.zeros(
                self.trajectory_id_labels.shape[1], dtype=torch.bool, device=distributed_device(),
            )
            for _ in range(len(newborn_id_labels)):
                if self.trajectory_id_labels.shape[0] > 0:
                    trajectory_remove_idxs |= (self.trajectory_id_labels[0] == newborn_id_labels[_])
                if newborn_id_labels[_].item() in self.id_label_to_id:
                    self.id_label_to_id.pop(newborn_id_labels[_].item())
            # remove from trajectory infos:
            self.trajectory_features = self.trajectory_features[:, ~trajectory_remove_idxs]
            self.trajectory_boxes = self.trajectory_boxes[:, ~trajectory_remove_idxs]
            self.trajectory_id_labels = self.trajectory_id_labels[:, ~trajectory_remove_idxs]
            self.trajectory_times = self.trajectory_times[:, ~trajectory_remove_idxs]
            self.trajectory_masks = self.trajectory_masks[:, ~trajectory_remove_idxs]
            # 4. assign id labels to newborn instances:
            pred_id_labels[pred_id_labels == self.num_id_vocabulary] = newborn_id_labels
            # 5. update id infos:
            for _ in range(len(newborn_id_labels)):
                self.id_label_to_id[newborn_id_labels[_].item()] = self.next_id
                self.next_id += 1

            return pred_id_labels

    def _update_trajectory_infos(
        self,
        boxes: torch.Tensor,
        output_embeds: torch.Tensor,
        id_labels: torch.Tensor,
        id_pred_labels_for_lwg: torch.Tensor | None = None,
    ):
        self.timestep += 1
        if self._skip_sequence:
            self._p_window.clear()
            self._write_window.clear()
            return

        # If LWG enabled: decision to buffer or accept (with cold-start handling and HAM modulation)
        self._lwg_debug = []
        if self.use_lwg and self.lwg_model is not None and boxes.numel() > 0:
            try:
                with torch.no_grad():
                    device = boxes.device
                    H, W = int(self.bbox_unnorm[1].item()), int(self.bbox_unnorm[0].item())
                    area_img = float(H * W)
                    # Build last prototypes map
                    proto_map = {}
                    if self.trajectory_features.shape[0] > 0:
                        last_features = self.trajectory_features[-1]
                        last_masks = self.trajectory_masks[-1]
                        last_ids = self.trajectory_id_labels[-1]
                        for i in range(last_features.shape[0]):
                            if not last_masks[i].item():
                                proto_map[int(last_ids[i].item())] = last_features[i]
                    num_objects = boxes.shape[0]
                    feats = torch.zeros((num_objects, 7), dtype=torch.float32, device=device)
                    features_list = []
                    feature_meta = []
                    debug_info = []
                    if id_pred_labels_for_lwg is not None and len(id_pred_labels_for_lwg) > 0:
                        pred_labels_for_features = id_pred_labels_for_lwg.clone().to(device)
                    else:
                        pred_labels_for_features = torch.empty((0,), dtype=torch.int64, device=device)
                    if num_objects > 0:
                        bbox_scale = self.bbox_unnorm.to(device)
                        xywh = box_cxcywh_to_xywh(boxes) * bbox_scale
                        w_pix = xywh[:, 2].clamp(min=1e-6)
                        h_pix = xywh[:, 3].clamp(min=1e-6)
                        area_ratio_tensor = (w_pix * h_pix / (area_img + 1e-6)).clamp(min=0.0)
                        aspect_ratio_tensor = (w_pix / h_pix).clamp(min=0.0)
                        scores_tensor = self.current_track_results.get("score", torch.ones((num_objects,), device=device, dtype=torch.float32))
                        scores_tensor = torch.nan_to_num(scores_tensor.to(device=device, dtype=torch.float32), nan=0.0, posinf=1.0, neginf=0.0)
                        last_boxes_abs = None
                        last_masks_latest = None
                        if self.trajectory_boxes.shape[0] > 0:
                            last_boxes = self.trajectory_boxes[-1].to(device)
                            last_masks_latest = self.trajectory_masks[-1]
                            if last_boxes.shape[0] > 0:
                                last_boxes_abs = box_cxcywh_to_xywh(last_boxes) * bbox_scale
                        for idx in range(num_objects):
                            s_det = float(scores_tensor[idx].item())
                            sim1 = 0.0
                            sim2 = 0.0
                            margin = 0.0
                            if proto_map:
                                cur = output_embeds[idx]
                                cur = cur / (cur.norm(p=2) + 1e-6)
                                sims = []
                                for _, v in proto_map.items():
                                    v_n = v / (v.norm(p=2) + 1e-6)
                                    sims.append(float((cur * v_n).sum().item()))
                                sims.sort(reverse=True)
                                if len(sims) > 0:
                                    sim1 = sims[0]
                                if len(sims) > 1:
                                    sim2 = sims[1]
                                margin = sim1 - sim2
                            if not math.isfinite(sim1):
                                sim1 = 0.0
                            if not math.isfinite(margin):
                                margin = 0.0
                            area_ratio_val = float(area_ratio_tensor[idx].item())
                            aspect_ratio_val = float(aspect_ratio_tensor[idx].item())
                            best_iou = 0.0
                            if last_boxes_abs is not None and last_masks_latest is not None and last_boxes_abs.shape[0] > 0:
                                cb = xywh[idx]
                                ax1 = float(cb[0].item())
                                ay1 = float(cb[1].item())
                                aw = float(cb[2].item())
                                ah = float(cb[3].item())
                                ax2 = ax1 + aw
                                ay2 = ay1 + ah
                                for j in range(last_boxes_abs.shape[0]):
                                    if last_masks_latest[j].item():
                                        continue
                                    lb = last_boxes_abs[j]
                                    bx1 = float(lb[0].item())
                                    by1 = float(lb[1].item())
                                    bw = float(lb[2].item())
                                    bh = float(lb[3].item())
                                    bx2 = bx1 + bw
                                    by2 = by1 + bh
                                    ix1 = max(ax1, bx1)
                                    iy1 = max(ay1, by1)
                                    ix2 = min(ax2, bx2)
                                    iy2 = min(ay2, by2)
                                    iw = max(0.0, ix2 - ix1)
                                    ih = max(0.0, iy2 - iy1)
                                    inter = iw * ih
                                    ua = aw * ah + bw * bh - inter + 1e-6
                                    best_iou = max(best_iou, inter / ua)
                            best_iou = max(0.0, min(1.0, best_iou))
                            id_pred = int(pred_labels_for_features[idx].item()) if idx < len(pred_labels_for_features) else -1
                            prev_sim = self._lwg_last_sim.get(id_pred, None)
                            prev_sim_val = prev_sim if prev_sim is not None else sim1
                            delta_sim1 = sim1 - prev_sim_val
                            if not math.isfinite(delta_sim1):
                                delta_sim1 = 0.0
                            feats[idx, 0] = s_det
                            feats[idx, 1] = sim1
                            feats[idx, 2] = margin
                            feats[idx, 3] = area_ratio_val
                            feats[idx, 4] = aspect_ratio_val
                            feats[idx, 5] = best_iou
                            feats[idx, 6] = delta_sim1
                            features_list.append([s_det, sim1, margin, area_ratio_val, aspect_ratio_val, best_iou, delta_sim1])
                            feature_meta.append({
                                "id_pred": id_pred,
                                "sim1": sim1,
                                "prev_sim1": prev_sim_val,
                                "delta_sim1": delta_sim1,
                                "best_iou": best_iou,
                                "margin": margin,
                                "area_ratio": area_ratio_val,
                                "aspect_ratio": aspect_ratio_val,
                                "s_det": s_det,
                            })
                    try:
                        model_device = next(self.lwg_model.parameters()).device
                        if feats.device != model_device:
                            feats = feats.to(model_device)
                    except StopIteration:
                        model_device = feats.device
                    cold_start = self.timestep < self.lwg_cold_start_steps
                    if cold_start:
                        p_write = torch.ones((num_objects,), dtype=torch.float32, device=feats.device)
                        effective_buf_th = -1.0
                        fallback_reason = "cold_start"
                    else:
                        expected_dim = feats.shape[1]
                        try:
                            if hasattr(self.lwg_model, "net") and len(self.lwg_model.net) > 0 and hasattr(self.lwg_model.net[0], "in_features"):
                                expected_dim = int(self.lwg_model.net[0].in_features)
                            elif hasattr(self.lwg_model, "in_dim"):
                                expected_dim = int(getattr(self.lwg_model, "in_dim"))
                        except Exception:
                            expected_dim = feats.shape[1]
                        if feats.shape[1] != expected_dim:
                            if feats.shape[1] > expected_dim:
                                feats_input = feats[:, :expected_dim]
                            else:
                                pad = torch.zeros((feats.shape[0], expected_dim - feats.shape[1]), dtype=feats.dtype, device=feats.device)
                                feats_input = torch.cat([feats, pad], dim=1)
                        else:
                            feats_input = feats
                        p_write = self.lwg_model(feats_input).float().view(-1)
                        p_write = torch.nan_to_num(p_write, nan=0.0, posinf=1.0, neginf=0.0).clamp_(0.0, 1.0)
                        mean_p = float(p_write.mean().item()) if p_write.numel() > 0 else 0.0
                        if self._ewma_p_write is None:
                            self._ewma_p_write = mean_p
                        else:
                            self._ewma_p_write = (1.0 - self.lwg_adaptive_alpha) * self._ewma_p_write + self.lwg_adaptive_alpha * mean_p
                        adaptive_target = self.lwg_buf_th + self.lwg_adaptive_gain * (self._ewma_p_write - self.lwg_buf_th)
                        adaptive_target = max(self.lwg_safe_buf_th, min(1.0, adaptive_target))
                        recent_p_mean = (sum(self._p_window) / len(self._p_window)) if len(self._p_window) > 0 else None
                        recent_write_rate = (sum(self._write_window) / len(self._write_window)) if len(self._write_window) > 0 else None
                        effective_buf_th = adaptive_target
                        fallback_reason = "nominal"
                        if recent_p_mean is not None and recent_write_rate is not None:
                            if (recent_p_mean < self.lwg_low_p_mean) or (recent_write_rate < self.lwg_min_write_rate):
                                effective_buf_th = min(effective_buf_th, self.lwg_safe_buf_th)
                                fallback_reason = "low_stats"
                    effective_buf_th = float(effective_buf_th)
                    if cold_start:
                        effective_buf_th = -1.0
                    mask_to_buffer = (p_write < effective_buf_th) if effective_buf_th >= 0 else torch.zeros_like(p_write, dtype=torch.bool)
                    n_total = feats.shape[0]
                    n_buf = int(mask_to_buffer.to(torch.int64).sum().item()) if n_total > 0 else 0
                    n_write = n_total - n_buf
                    self.lwg_stats["total"] += n_total
                    self.lwg_stats["buffered"] += n_buf
                    self.lwg_stats["written"] += n_write
                    if n_total > 0:
                        sum_p = float(p_write.sum().item())
                        self.lwg_stats["p_sum"] += sum_p
                        p_min_val = float(p_write.min().item())
                        p_max_val = float(p_write.max().item())
                        self.lwg_stats["p_min"] = p_min_val if self.lwg_stats["p_min"] is None else min(self.lwg_stats["p_min"], p_min_val)
                        self.lwg_stats["p_max"] = p_max_val if self.lwg_stats["p_max"] is None else max(self.lwg_stats["p_max"], p_max_val)
                        self.lwg_stats["below_buf"] += int((p_write < self.lwg_buf_th).to(torch.int64).sum().item())
                        self.lwg_stats["below_main"] += int((p_write < self.lwg_main_th).to(torch.int64).sum().item())
                        self._p_window.append(float(p_write.mean().item()))
                        self._write_window.append(n_write / float(n_total))
                    if mask_to_buffer.numel() > 0:
                        keep, quarantine_events = self._apply_quarantine(
                            mask_to_buffer=mask_to_buffer,
                            id_labels=id_labels,
                            device=output_embeds.device,
                        )
                    else:
                        keep = torch.ones_like(mask_to_buffer, dtype=torch.bool, device=mask_to_buffer.device)
                        quarantine_events = []
                    buffer_gate_overrides = {}
                    buffer_training_samples: dict[int, dict] = {}
                    if quarantine_events:
                        for event in quarantine_events:
                            idx = event["idx"]
                            id_lbl = event["id_label"]
                            status = event["status"]
                            if status == "reset":
                                history_prev = self._buffer_records.pop(id_lbl, None)
                                if history_prev:
                                    agg_features = self._aggregate_buffer_history(list(history_prev))
                                    buffer_training_samples[idx] = {
                                        "id_label": id_lbl,
                                        "features": agg_features,
                                        "forced_label": 0,
                                    }
                                continue
                            sample = self._build_buffer_sample(
                                idx=idx,
                                feature_meta=feature_meta,
                                p_write=p_write,
                                event_status=status,
                            )
                            history = self._update_buffer_history(id_lbl, sample, status)
                            if status == "promote" and history is not None:
                                agg_features = self._aggregate_buffer_history(history)
                                buffer_training_samples[idx] = {
                                    "id_label": id_lbl,
                                    "features": agg_features,
                                    "forced_label": None,
                                }
                                accept = True
                                if self.use_buffer_gate and self.buffer_gate_model is not None:
                                    accept = self._buffer_gate_decision(agg_features)
                                if not accept:
                                    buffer_gate_overrides[idx] = False
                                else:
                                    buffer_gate_overrides[idx] = True
                    for idx in range(mask_to_buffer.numel()):
                        decision = not bool(mask_to_buffer[idx].item())
                        decision_reason = "accept" if decision else ("buffer" if fallback_reason == "nominal" else fallback_reason)
                        meta = feature_meta[idx] if idx < len(feature_meta) else {}
                        debug_info.append({
                            "p_write": float(p_write[idx].item()),
                            "effective_buf_th": float(effective_buf_th),
                            "decision_write": decision,
                            "decision_reason": decision_reason,
                            "best_iou": float(meta.get("best_iou", 0.0)),
                            "delta_sim1": float(meta.get("delta_sim1", 0.0)),
                            "prev_sim1": float(meta.get("prev_sim1", 0.0)),
                        })
                    if buffer_gate_overrides:
                        for idx, accepted in buffer_gate_overrides.items():
                            if idx < len(debug_info):
                                if not accepted:
                                    debug_info[idx]["decision_write"] = False
                                    debug_info[idx]["decision_reason"] = "buffer_gate_drop"
                                else:
                                    debug_info[idx]["decision_reason"] = debug_info[idx].get("decision_reason", "accept")
                            if not accepted and idx < keep.numel():
                                keep[idx] = False
                    if buffer_training_samples:
                        for idx, sample in buffer_training_samples.items():
                            if sample.get("forced_label") is not None:
                                accepted = bool(sample["forced_label"])
                            else:
                                accepted = True
                                if idx < keep.numel():
                                    accepted = bool(keep[idx].item())
                            self._record_buffer_training_sample(
                                id_label=sample["id_label"],
                                features=sample["features"],
                                accepted=accepted,
                            )
                    if keep is not None and (~keep).any():
                        mask = keep.to(torch.bool).tolist()
                        output_embeds = output_embeds.clone()[keep]
                        boxes = boxes.clone()[keep]
                        id_labels = id_labels.clone()[keep]
                        debug_info = [info for info, m in zip(debug_info, mask) if m]
                        feature_meta = [meta for meta, m in zip(feature_meta, mask) if m]
                        if pred_labels_for_features.numel() > 0:
                            pred_labels_for_features = pred_labels_for_features[keep]
                    if feature_meta:
                        for meta in feature_meta:
                            id_pred = meta.get("id_pred", -1)
                            if id_pred is not None and id_pred >= 0:
                                self._lwg_last_sim[id_pred] = float(meta.get("sim1", 0.0))
                    self._lwg_debug = debug_info
            except Exception as exc:
                print(f"[RuntimeTracker][WARN] LWG decision failed: {exc}")
                self._lwg_debug = []
        else:
            self._lwg_debug = []

        if self.export_lwg_features and self.sequence_name is not None and self.lwg_feature_dump_dir is not None:
            try:
                self._export_lwg_features(
                    boxes=boxes,
                    output_embeds=output_embeds,
                    id_labels=id_labels,
                    scores=self.current_track_results.get("score", None),
                )
            except Exception:
                pass

        # 1. cut trajectory infos:
        self.trajectory_features = self.trajectory_features[-self.miss_tolerance + 2:, ...]
        self.trajectory_boxes = self.trajectory_boxes[-self.miss_tolerance + 2:, ...]
        self.trajectory_id_labels = self.trajectory_id_labels[-self.miss_tolerance + 2:, ...]
        self.trajectory_times = self.trajectory_times[-self.miss_tolerance + 2:, ...]
        self.trajectory_masks = self.trajectory_masks[-self.miss_tolerance + 2:, ...]
        # 2. find out all new instances:
        already_id_labels = set(self.trajectory_id_labels[0].tolist() if self.trajectory_id_labels.shape[0] > 0 else [])
        _id_labels = set(id_labels.tolist())
        newborn_id_labels = _id_labels - already_id_labels
        # 3. add newborn instances to trajectory infos:
        if len(newborn_id_labels) > 0:
            newborn_id_labels = torch.tensor(list(newborn_id_labels), dtype=torch.int64, device=distributed_device())
            _T = self.trajectory_id_labels.shape[0]
            _N = len(newborn_id_labels)
            _id_labels = einops.repeat(newborn_id_labels, 'n -> t n', t=_T)
            _boxes = torch.zeros((_T, _N, 4), dtype=self.dtype, device=distributed_device())
            _times = einops.repeat(
                torch.arange(_T, dtype=torch.int64, device=distributed_device()), 't -> t n', n=_N,
            )
            _features = torch.zeros(
                (_T, _N, 256), dtype=self.dtype, device=distributed_device(),
            )
            _masks = torch.ones((_T, _N), dtype=torch.bool, device=distributed_device())
            # 3.1. padding to trajectory infos:
            self.trajectory_id_labels = torch.cat([self.trajectory_id_labels, _id_labels], dim=1)
            self.trajectory_boxes = torch.cat([self.trajectory_boxes, _boxes], dim=1)
            self.trajectory_times = torch.cat([self.trajectory_times, _times], dim=1)
            self.trajectory_features = torch.cat([self.trajectory_features, _features], dim=1)
            self.trajectory_masks = torch.cat([self.trajectory_masks, _masks], dim=1)
        # 4. update trajectory infos:
        _N = self.trajectory_id_labels.shape[1]
        # 确保轨迹数据结构已初始化
        self._ensure_initialized(id_labels)
        current_id_labels = self.trajectory_id_labels[0] if self.trajectory_id_labels.shape[0] > 0 else id_labels
        current_features = torch.zeros((_N, 256), dtype=self.dtype, device=distributed_device())
        current_boxes = torch.zeros((_N, 4), dtype=self.dtype, device=distributed_device())
        current_times = self.trajectory_id_labels.shape[0] * torch.ones((_N,), dtype=torch.int64, device=distributed_device())
        current_masks = torch.ones((_N,), dtype=torch.bool, device=distributed_device())
        # 4.1. find out the same id labels (matching):
        indices = torch.eq(current_id_labels[:, None], id_labels[None, :]).nonzero(as_tuple=False)
        current_idxs = indices[:, 0]
        idxs = indices[:, 1]
        # 4.2. fill in the infos:
        current_id_labels[current_idxs] = id_labels[idxs]
        current_features[current_idxs] = output_embeds[idxs]
        current_boxes[current_idxs] = boxes[idxs]
        current_masks[current_idxs] = False
        # 对被阻止写入的条目清零，避免存入脏数据
        blocked_indices = current_masks.nonzero(as_tuple=False).view(-1)
        if blocked_indices.numel() > 0:
            current_features[blocked_indices] = 0
            current_boxes[blocked_indices] = 0
        # 4.3. 将结果写入轨迹缓存
        self.trajectory_features = torch.cat([self.trajectory_features, current_features[None, ...]], dim=0).contiguous()
        self.trajectory_boxes = torch.cat([self.trajectory_boxes, current_boxes[None, ...]], dim=0).contiguous()
        self.trajectory_id_labels = torch.cat([self.trajectory_id_labels, current_id_labels[None, ...]], dim=0).contiguous()
        self.trajectory_times = torch.cat([self.trajectory_times, current_times[None, ...]], dim=0).contiguous()
        self.trajectory_masks = torch.cat([self.trajectory_masks, current_masks[None, ...]], dim=0).contiguous()
        # 4.4. a hack implementation to fix "times":
        self.trajectory_times = einops.repeat(
            torch.arange(self.trajectory_times.shape[0], dtype=torch.int64, device=distributed_device()),
            't -> t n', n=self.trajectory_times.shape[1],
        ).contiguous().clone()
        return

    def close_sequence(self):
        self._flush_export_buffer()
        self._flush_buffer_gate_export_buffer()
        self._state = "CLOSED"
        self.sequence_name = None
        self._skip_sequence = False
        self._lwg_debug = []
        self._buffer_records.clear()
        self._quarantine_state.clear()

    def _filter_out_inactive_tracks(self):
        is_active = torch.sum((~self.trajectory_masks).to(torch.int64), dim=0) > 0
        self.trajectory_features = self.trajectory_features[:, is_active]
        self.trajectory_boxes = self.trajectory_boxes[:, is_active]
        self.trajectory_id_labels = self.trajectory_id_labels[:, is_active]
        self.trajectory_times = self.trajectory_times[:, is_active]
        self.trajectory_masks = self.trajectory_masks[:, is_active]
        return

    def _apply_quarantine(self, mask_to_buffer: torch.Tensor, id_labels: torch.Tensor, device) -> tuple[torch.Tensor, list[dict]]:
        keep = torch.ones(mask_to_buffer.shape[0], dtype=torch.bool, device=device)
        events: list[dict] = []
        if mask_to_buffer is None or mask_to_buffer.numel() == 0:
            return keep, events
        buffer_idxs = mask_to_buffer.nonzero(as_tuple=False).view(-1)
        for idx in buffer_idxs.tolist():
            id_lbl = int(id_labels[idx].item())
            state = self._quarantine_state.get(id_lbl, {"count": 0, "last_t": -1})
            if state.get("last_t", -1) == self.timestep - 1:
                state["count"] += 1
            else:
                state["count"] = 1
            state["last_t"] = self.timestep
            if state["count"] >= max(1, int(self.buffer_promote_steps)):
                keep[idx] = True
                state["count"] = 0
                events.append({"idx": idx, "id_label": id_lbl, "status": "promote"})
            else:
                keep[idx] = False
                events.append({"idx": idx, "id_label": id_lbl, "status": "buffer"})
            self._quarantine_state[id_lbl] = state
        not_buffer_idxs = (~mask_to_buffer).nonzero(as_tuple=False).view(-1)
        for idx in not_buffer_idxs.tolist():
            id_lbl = int(id_labels[idx].item())
            state = self._quarantine_state.get(id_lbl)
            if state is not None:
                state["count"] = 0
                state["last_t"] = self.timestep
                self._quarantine_state[id_lbl] = state
            events.append({"idx": idx, "id_label": id_lbl, "status": "reset"})
        stale_ids = [k for k, v in self._quarantine_state.items() if v.get("last_t", -1) < self.timestep - 10]
        for sid in stale_ids:
            self._quarantine_state.pop(sid, None)
        return keep, events

    def _build_buffer_sample(self, idx: int, feature_meta: list[dict], p_write: torch.Tensor, event_status: str):
        if event_status == "reset":
            return None
        meta = feature_meta[idx] if idx < len(feature_meta) else {}
        sample = {
            "p_write": float(p_write[idx].item()) if p_write is not None and idx < p_write.numel() else 1.0,
            "sim1": float(meta.get("sim1", 0.0)),
            "delta_sim1": float(meta.get("delta_sim1", 0.0)),
            "best_iou": float(meta.get("best_iou", 0.0)),
            "margin": float(meta.get("margin", 0.0)),
            "area_ratio": float(meta.get("area_ratio", 0.0)),
            "aspect_ratio": float(meta.get("aspect_ratio", 1.0)),
            "s_det": float(meta.get("s_det", 1.0)),
        }
        return sample

    def _update_buffer_history(self, id_label: int, sample: dict | None, status: str):
        if status == "reset":
            if id_label in self._buffer_records:
                del self._buffer_records[id_label]
            return None
        history = self._buffer_records.get(id_label)
        if history is None:
            history = deque(maxlen=self.buffer_history_window)
        if sample is not None:
            history.append(sample)
        if status == "buffer":
            self._buffer_records[id_label] = history
            return None
        if status == "promote":
            records = list(history)
            if id_label in self._buffer_records:
                del self._buffer_records[id_label]
            return records
        return None

    def _aggregate_buffer_history(self, samples: list[dict]) -> list[float]:
        if not samples:
            return [0.0] * 18
        n = len(samples)

        def stats(values):
            if not values:
                return 0.0, 0.0, 0.0, 0.0
            mean_v = sum(values) / len(values)
            return mean_v, min(values), max(values), values[-1]

        p_writes = [float(s.get("p_write", 1.0)) for s in samples]
        best_ious = [float(s.get("best_iou", 0.0)) for s in samples]
        deltas = [float(s.get("delta_sim1", 0.0)) for s in samples]
        margins = [float(s.get("margin", 0.0)) for s in samples]
        s_dets = [float(s.get("s_det", 1.0)) for s in samples]
        area_ratios = [float(s.get("area_ratio", 0.0)) for s in samples]
        aspect_ratios = [float(s.get("aspect_ratio", 1.0)) for s in samples]

        mean_pw, min_pw, max_pw, last_pw = stats(p_writes)
        mean_iou, min_iou, max_iou, last_iou = stats(best_ious)
        mean_delta, min_delta, max_delta, last_delta = stats(deltas)
        mean_margin = sum(margins) / len(margins) if margins else 0.0
        mean_s_det = sum(s_dets) / len(s_dets) if s_dets else 0.0
        last_s_det = s_dets[-1] if s_dets else mean_s_det
        mean_area = sum(area_ratios) / len(area_ratios) if area_ratios else 0.0
        mean_aspect = sum(aspect_ratios) / len(aspect_ratios) if aspect_ratios else 1.0

        return [
            float(n),
            float(mean_pw), float(min_pw), float(max_pw), float(last_pw),
            float(mean_iou), float(min_iou), float(max_iou), float(last_iou),
            float(mean_delta), float(min_delta), float(max_delta), float(last_delta),
            float(mean_margin),
            float(mean_s_det), float(last_s_det),
            float(mean_area),
            float(mean_aspect),
        ]

    def _buffer_gate_decision(self, features: list[float]) -> bool:
        if self.buffer_gate_model is None:
            return True
        expected_dim = len(features)
        try:
            if hasattr(self.buffer_gate_model, "net") and len(self.buffer_gate_model.net) > 0 and hasattr(self.buffer_gate_model.net[0], "in_features"):
                expected_dim = int(self.buffer_gate_model.net[0].in_features)
            elif hasattr(self.buffer_gate_model, "in_dim"):
                expected_dim = int(getattr(self.buffer_gate_model, "in_dim"))
        except Exception:
            expected_dim = len(features)
        vec = features[:expected_dim]
        if len(vec) < expected_dim:
            vec = vec + [0.0] * (expected_dim - len(vec))
        device = getattr(self, "_buffer_gate_device", distributed_device())
        tensor = torch.tensor(vec, dtype=torch.float32, device=device).view(1, -1)
        with torch.no_grad():
            prob = float(self.buffer_gate_model(tensor).item())
        return prob >= self.buffer_gate_thresh

    def _export_lwg_features(self, boxes: torch.Tensor, output_embeds: torch.Tensor, id_labels: torch.Tensor, scores: torch.Tensor | None):
        """
        Collect per-object features for LWG training/diagnostics and buffer them for batched writes.
        Fields: seq, t, idx, id_label, track_id, s_det, sim1, sim2, margin, track_age, delta_t,
                area_ratio, aspect_ratio, x, y, w, h, best_iou, delta_sim1,
                p_write, effective_buf_th, decision_write, decision_reason
        """
        if self.lwg_feature_dump_dir is None or self.sequence_name is None or self._skip_sequence:
            return
        try:
            import math
            rows = []
            T = self.trajectory_features.shape[0]
            proto_map = {}
            if T > 0:
                last_features = self.trajectory_features[-1]
                last_masks = self.trajectory_masks[-1]
                last_ids = self.trajectory_id_labels[-1]
                for i in range(last_features.shape[0]):
                    if not last_masks[i].item():
                        proto_map[int(last_ids[i].item())] = last_features[i]
            debug_info = self._lwg_debug or []
            H, W = int(self.bbox_unnorm[1].item()), int(self.bbox_unnorm[0].item())
            area_img = float(H * W)
            xywh = box_cxcywh_to_xywh(boxes) * self.bbox_unnorm.to(boxes.device) if boxes.numel() > 0 else boxes
            for idx in range(boxes.shape[0]):
                cur_feat = output_embeds[idx]
                id_label = int(id_labels[idx].item())
                s_det = float(scores[idx].item()) if scores is not None else float("nan")
                track_id = None
                try:
                    if "id" in self.current_track_results:
                        track_id = int(self.current_track_results["id"][idx].item())
                except Exception:
                    track_id = None
                sim1 = float("nan")
                sim2 = float("nan")
                margin = float("nan")
                if proto_map:
                    cur = cur_feat / (cur_feat.norm(p=2) + 1e-6)
                    sims = []
                    for _, v in proto_map.items():
                        v_n = v / (v.norm(p=2) + 1e-6)
                        sims.append(float((cur * v_n).sum().item()))
                    sims.sort(reverse=True)
                    if len(sims) > 0:
                        sim1 = sims[0]
                    if len(sims) > 1:
                        sim2 = sims[1]
                    if math.isfinite(sim1) and math.isfinite(sim2):
                        margin = sim1 - sim2
                ids_row = self.trajectory_id_labels[-1] if self.trajectory_id_labels.shape[0] > 0 else None
                track_age = 0
                delta_t = 0
                if ids_row is not None:
                    match_idx = (ids_row == id_labels[idx]).nonzero(as_tuple=False)
                    if match_idx.numel() > 0:
                        j = int(match_idx[0].item())
                        track_age = int((~self.trajectory_masks[:, j]).to(torch.int64).sum().item())
                        non_mask_idxs = (~self.trajectory_masks[:, j]).nonzero(as_tuple=False)
                        if non_mask_idxs.numel() > 0:
                            last_update_t = int(non_mask_idxs[-1].item())
                            delta_t = int((self.trajectory_masks.shape[0] - 1) - last_update_t)
                if boxes.numel() > 0:
                    x, y, w, h = xywh[idx]
                    w_pix = float(w.item())
                    h_pix = float(h.item())
                else:
                    x = y = w = h = torch.tensor(0.0, device=self.bbox_unnorm.device)
                    w_pix = h_pix = 0.0
                area_ratio = max(w_pix * h_pix / (area_img + 1e-6), 0.0) if area_img > 0 else 0.0
                aspect_ratio = w_pix / (h_pix + 1e-6)
                debug = debug_info[idx] if idx < len(debug_info) else {
                    "p_write": float("nan"),
                    "effective_buf_th": float("nan"),
                    "decision_write": True,
                    "decision_reason": "n/a",
                    "best_iou": float("nan"),
                    "delta_sim1": float("nan"),
                    "prev_sim1": float("nan"),
                }
                if "best_iou" not in debug:
                    debug["best_iou"] = float("nan")
                if "delta_sim1" not in debug:
                    debug["delta_sim1"] = float("nan")
                row = [
                    self.sequence_name,
                    int(self.timestep),
                    int(idx),
                    id_label,
                    track_id if track_id is not None else "",
                    f"{s_det:.6f}" if math.isfinite(s_det) else "",
                    f"{sim1:.6f}" if math.isfinite(sim1) else "",
                    f"{sim2:.6f}" if math.isfinite(sim2) else "",
                    f"{margin:.6f}" if math.isfinite(margin) else "",
                    track_age,
                    delta_t,
                    f"{area_ratio:.6f}",
                    f"{aspect_ratio:.6f}",
                    f"{(x.item() if boxes.numel() > 0 else 0.0):.2f}",
                    f"{(y.item() if boxes.numel() > 0 else 0.0):.2f}",
                    f"{w_pix:.2f}",
                    f"{h_pix:.2f}",
                    f"{debug['best_iou']:.6f}" if math.isfinite(debug['best_iou']) else "",
                    f"{debug['delta_sim1']:.6f}" if math.isfinite(debug['delta_sim1']) else "",
                    f"{debug['p_write']:.6f}" if math.isfinite(debug['p_write']) else "",
                    f"{debug['effective_buf_th']:.6f}" if math.isfinite(debug['effective_buf_th']) else "",
                    int(debug['decision_write']),
                    debug['decision_reason'],
                ]
                rows.append(row)
            self._export_buffer.extend(rows)
            if len(self._export_buffer) >= self._export_flush_size:
                self._flush_export_buffer()
        except Exception as exc:
            print(f"[RuntimeTracker][WARN] Failed to collect LWG features: {exc}")

    def _flush_export_buffer(self):
        if not self._export_buffer or self.lwg_feature_dump_dir is None or self.sequence_name is None:
            self._export_buffer.clear()
            return
        dump_path = os.path.join(
            self.lwg_feature_dump_dir, f"{self.sequence_name}_rank{self.process_rank}.csv"
        )
        try:
            import csv
            dir_path = os.path.dirname(dump_path) or "."
            os.makedirs(dir_path, exist_ok=True)
            with open(dump_path, "a", newline="") as f:
                writer = csv.writer(f)
                writer.writerows(self._export_buffer)
        except Exception as exc:
            print(f"[RuntimeTracker][WARN] Failed to flush LWG features: {exc}")
        finally:
            self._export_buffer.clear()

    def _record_buffer_training_sample(self, id_label: int, features: list[float], accepted: bool):
        if not self.export_buffer_gate_features or self.buffer_gate_dump_dir is None or self.sequence_name is None or self._skip_sequence:
            return
        try:
            row = [
                self.sequence_name,
                int(self.timestep),
                int(id_label),
                int(1 if accepted else 0),
            ]
            row.extend(f"{float(v):.6f}" for v in features)
            self._buffer_export_buffer.append(row)
            if len(self._buffer_export_buffer) >= self._buffer_export_flush_size:
                self._flush_buffer_gate_export_buffer()
        except Exception as exc:
            print(f"[RuntimeTracker][WARN] Failed to record buffer gate sample: {exc}")

    def _flush_buffer_gate_export_buffer(self):
        if not self._buffer_export_buffer or self.buffer_gate_dump_dir is None or self.sequence_name is None:
            self._buffer_export_buffer.clear()
            return
        dump_path = os.path.join(
            self.buffer_gate_dump_dir, f"{self.sequence_name}_buffer_gate_rank{self.process_rank}.csv"
        )
        try:
            import csv
            dir_path = os.path.dirname(dump_path) or "."
            os.makedirs(dir_path, exist_ok=True)
            file_exists = os.path.exists(dump_path) and os.path.getsize(dump_path) > 0
            with open(dump_path, "a", newline="") as f:
                writer = csv.writer(f)
                if not file_exists:
                    writer.writerow(self._buffer_export_header)
                writer.writerows(self._buffer_export_buffer)
        except Exception as exc:
            print(f"[RuntimeTracker][WARN] Failed to flush buffer gate features: {exc}")
        finally:
            self._buffer_export_buffer.clear()

    def _hungarian_assignment(self, id_scores: torch.Tensor):
        id_labels = list()  # final ID labels
        if len(id_scores) > 1:
            id_scores_newborn_repeat = id_scores[:, -1:].repeat(1, len(id_scores) - 1)
            id_scores = torch.cat((id_scores, id_scores_newborn_repeat), dim=-1)
        trajectory_id_labels_set = set(self.trajectory_id_labels[0].tolist())
        match_rows, match_cols = linear_sum_assignment(1 - id_scores.cpu())
        for _ in range(len(match_rows)):
            _id = match_cols[_]
            if _id not in trajectory_id_labels_set:
                id_labels.append(self.num_id_vocabulary)
            elif _id >= self.num_id_vocabulary:
                id_labels.append(self.num_id_vocabulary)
            elif id_scores[match_rows[_], _id] < self.id_thresh:
                id_labels.append(self.num_id_vocabulary)
            else:
                id_labels.append(_id)
        return id_labels

    def _object_max_assignment(self, id_scores: torch.Tensor):
        id_labels = list()  # final ID labels
        trajectory_id_labels_set = set(self.trajectory_id_labels[0].tolist())   # all tracked ID labels

        object_max_confs, object_max_id_labels = torch.max(id_scores, dim=-1)   # get the target ID labels and confs
        # Get the max confs of each ID label:
        id_max_confs = dict()
        for conf, id_label in zip(object_max_confs.tolist(), object_max_id_labels.tolist()):
            if id_label not in id_max_confs:
                id_max_confs[id_label] = conf
            else:
                # if conf == id_max_confs[id_label]:  # a very rare case
                #     conf = conf - 0.0001
                id_max_confs[id_label] = max(id_max_confs[id_label], conf)
        if self.num_id_vocabulary in id_max_confs:
            id_max_confs[self.num_id_vocabulary] = 0.0  # special token

        # Assign ID labels:
        for _ in range(len(object_max_id_labels)):
            if object_max_id_labels[_].item() not in trajectory_id_labels_set:         # not in tracked IDs -> newborn
                id_labels.append(self.num_id_vocabulary)
            else:
                _id_label = object_max_id_labels[_].item()
                _conf = object_max_confs[_].item()
                if _conf < self.id_thresh or _conf < id_max_confs[_id_label]:  # low conf or not the max conf -> newborn
                    id_labels.append(self.num_id_vocabulary)
                elif _id_label in id_labels:
                    id_labels.append(self.num_id_vocabulary)
                else:                                                          # normal case
                    id_labels.append(_id_label)

        return id_labels

    def _id_max_assignment(self, id_scores: torch.Tensor):
        id_labels = [self.num_id_vocabulary] * len(id_scores)  # final ID labels
        trajectory_id_labels_set = set(self.trajectory_id_labels[0].tolist())   # all tracked ID labels

        id_max_confs, id_max_obj_idxs = torch.max(id_scores, dim=0)
        # Get the max confs of each object:
        object_max_confs = dict()
        for conf, object_idx in zip(id_max_confs.tolist(), id_max_obj_idxs.tolist()):
            if object_idx not in object_max_confs:
                object_max_confs[object_idx] = conf
            else:
                if conf == object_max_confs[object_idx]:    # a very rare case
                    conf = conf - 0.0001
                object_max_confs[object_idx] = max(object_max_confs[object_idx], conf)

        # Assign ID labels:
        for _ in range(len(id_max_obj_idxs)):
            _obj_idx, _id_label, _conf = id_max_obj_idxs[_].item(), _, id_max_confs[_].item()
            if _conf < self.id_thresh or _conf < object_max_confs[_obj_idx]:
                pass
            elif _id_label not in trajectory_id_labels_set:
                pass
            else:
                id_labels[_obj_idx] = _id_label

        return id_labels

    def _ensure_initialized(self, id_labels: torch.Tensor):
        """确保轨迹数据结构已正确初始化"""
        if not self._is_initialized or self.trajectory_id_labels.shape[0] == 0:
            self._init_trajectories(id_labels)
            self._is_initialized = True

    def _init_trajectories(self, id_labels: torch.Tensor):
        """初始化轨迹数据结构"""
        if len(id_labels) == 0:
            return

        _N = len(id_labels)
        device = distributed_device()

        # 初始化轨迹数据结构
        self.trajectory_features = torch.zeros((1, _N, 256), dtype=self.dtype, device=device)
        self.trajectory_boxes = torch.zeros((1, _N, 4), dtype=self.dtype, device=device)
        self.trajectory_id_labels = id_labels.unsqueeze(0)  # (1, N)
        self.trajectory_times = torch.zeros((1, _N), dtype=self.dtype, device=device)
        self.trajectory_masks = torch.ones((1, _N), dtype=torch.bool, device=device)
