import os
import math
import torch

# 轻量内存管理器：封装 DACC（密度门控）、隔离缓冲、HAM（健康度+回收）
# 说明：为兼容 Python<3.10，避免使用 "|" 联合类型注解。


class MemoryManager:
    def __init__(
        self,
        bbox_unnorm_tensor: torch.Tensor,
        *,
        use_dacc: bool | None = None,
        use_score_calibration: bool | None = None,
    ):
        # bbox_unnorm_tensor: [W, H, W, H] on device
        self.bbox_unnorm = bbox_unnorm_tensor

        # DACC configs
        env_use_dacc = os.getenv("USE_DACC")
        if use_dacc is None:
            self.use_dacc = env_use_dacc == "True" if env_use_dacc is not None else False
        else:
            self.use_dacc = use_dacc
        self.dacc_alpha = float(os.getenv("DACC_ALPHA", "0.4"))
        self.dacc_beta = float(os.getenv("DACC_BETA", "0.4"))
        self.dacc_gamma = float(os.getenv("DACC_GAMMA", "0.2"))
        self.c_min = int(os.getenv("DACC_C_MIN", "8"))
        self.c_max = int(os.getenv("DACC_C_MAX", "32"))
        # scaling range for buffer threshold modulation
        self.dacc_scale_min = float(os.getenv("DACC_SCALE_MIN", "0.9"))
        self.dacc_scale_max = float(os.getenv("DACC_SCALE_MAX", "1.1"))

        # HAM configs
        self.ham_time_decay = float(os.getenv("HAM_TIME_DECAY", "0.98"))
        self.ham_alpha_a = float(os.getenv("HAM_ALPHA_A", "1.0"))
        self.ham_alpha_b = float(os.getenv("HAM_ALPHA_B", "1.0"))
        self.ham_alpha_c = float(os.getenv("HAM_ALPHA_C", "1.0"))
        self.ham_alpha_d = float(os.getenv("HAM_ALPHA_D", "1.0"))
        self.ham_beta_sigma = float(os.getenv("HAM_BETA_SIGMA", "0.1"))
        self.health_lambda1 = float(os.getenv("HEALTH_L1", "0.25"))
        self.health_lambda2 = float(os.getenv("HEALTH_L2", "0.25"))
        self.health_lambda3 = float(os.getenv("HEALTH_L3", "0.25"))
        self.health_lambda4 = float(os.getenv("HEALTH_L4", "0.25"))

        # States
        self.quarantine_buffer = {}  # {id_label: {count:int, last_t:int}}
        self.health_state = {}       # {id_label: {mu:Tensor, sigma2:float, usage:float, last_t:int, w:float, margin:float}}

        # Score calibration configs
        env_use_score_cal = os.getenv("USE_SCORE_CALIBRATION")
        if use_score_calibration is None:
            self.use_score_calibration = env_use_score_cal == "True" if env_use_score_cal is not None else False
        else:
            self.use_score_calibration = use_score_calibration
        self.score_alpha = float(os.getenv("SCORE_ALPHA", "0.7"))  # weight for detection score
        self.score_beta = float(os.getenv("SCORE_BETA", "0.3"))    # weight for tracking score
        self.similarity_threshold = float(os.getenv("SIMILARITY_THRESHOLD", "0.3"))  # threshold for considering as tracked object

    @torch.no_grad()
    def modulate_threshold(self, base_buf_th: float, boxes: torch.Tensor, features_list, trajectory_masks: torch.Tensor) -> float:
        """
        DACC：根据场景密度（活跃数、IoU、margin不确定性）调整缓冲阈值。
        features_list: [[s_det, sim1, margin, area_ratio, aspect_ratio], ...]
        """
        if (not self.use_dacc) or boxes is None or boxes.numel() == 0:
            return float(base_buf_th)
        try:
            # active tracks
            if trajectory_masks is None or trajectory_masks.shape[0] == 0:
                active_mask = None
            else:
                active_mask = (~trajectory_masks[-1])
            n_active = int(active_mask.to(torch.int64).sum().item()) if active_mask is not None else 0
            n_norm = 1.0 - math.exp(-n_active / 16.0)

            # frame overlap (sampled IoU)
            iou_term = 0.0
            if boxes.shape[0] >= 2:
                sample_idx = list(range(min(boxes.shape[0], 7)))
                Wp = float(self.bbox_unnorm[0].item()); Hp = float(self.bbox_unnorm[1].item())
                xywh = boxes[sample_idx].clone()
                xywh[:, 0] = (xywh[:, 0] - 0.5 * xywh[:, 2]) * Wp
                xywh[:, 1] = (xywh[:, 1] - 0.5 * xywh[:, 3]) * Hp
                xywh[:, 2] = xywh[:, 2] * Wp
                xywh[:, 3] = xywh[:, 3] * Hp
                cnt = 0; iou_acc = 0.0
                for i in range(xywh.shape[0]):
                    for j in range(i+1, xywh.shape[0]):
                        ax1, ay1, aw, ah = xywh[i]
                        bx1, by1, bw, bh = xywh[j]
                        ax2, ay2 = ax1+aw, ay1+ah
                        bx2, by2 = bx1+bw, by1+bh
                        ix1, iy1 = max(float(ax1), float(bx1)), max(float(ay1), float(by1))
                        ix2, iy2 = min(float(ax2), float(bx2)), min(float(ay2), float(by2))
                        iw, ih = max(0.0, ix2-ix1), max(0.0, iy2-iy1)
                        inter = iw*ih
                        ua = float(aw*ah + bw*bh - inter + 1e-6)
                        iou_acc += inter/ua
                        cnt += 1
                if cnt > 0:
                    iou_term = max(0.0, min(1.0, iou_acc / cnt))  # 限制IoU在合理范围内

            margin_term = 0.0
            if features_list is not None and len(features_list) == boxes.shape[0]:
                margins = [max(0.0, float(row[2])) for row in features_list]
                if len(margins) > 0:
                    margin_term = 1.0 - (sum(margins) / len(margins))

            D_t = self.dacc_alpha * n_norm + self.dacc_beta * iou_term + self.dacc_gamma * margin_term
            D_t = max(0.0, min(1.0, D_t))
            # 可配置轻量调制，避免过度保守
            scale_min = max(0.5, min(1.5, float(self.dacc_scale_min)))
            scale_max = max(0.5, min(1.5, float(self.dacc_scale_max)))
            if scale_min > scale_max:
                scale_min, scale_max = scale_max, scale_min
            dacc_scale = scale_min + (scale_max - scale_min) * D_t
            eff = float(base_buf_th) * dacc_scale
            return max(0.0, min(1.0, eff))
        except Exception:
            return float(base_buf_th)

    @torch.no_grad()
    def apply_quarantine(self, mask_to_buffer: torch.Tensor, id_labels: torch.Tensor, timestep: int, buffer_promote_steps: int, num_objects: int, device) -> torch.Tensor:
        """根据 mask_to_buffer 生成 keep 掩码，连续缓冲达到阈值后转正写入。"""
        keep = torch.ones(num_objects, dtype=torch.bool, device=device)
        if mask_to_buffer is None or mask_to_buffer.numel() == 0:
            return keep
        buffer_idxs = mask_to_buffer.nonzero(as_tuple=False).view(-1)
        for idx in buffer_idxs.tolist():
            id_lbl = int(id_labels[idx].item())
            stat = self.quarantine_buffer.get(id_lbl, {"count": 0, "last_t": -1})
            if stat["last_t"] == timestep - 1:
                stat["count"] += 1
            else:
                stat["count"] = 1
            stat["last_t"] = timestep
            if stat["count"] >= int(buffer_promote_steps):
                keep[idx] = True
                stat["count"] = 0
            else:
                keep[idx] = False
            self.quarantine_buffer[id_lbl] = stat
        # 非缓冲索引打断连续，清理状态残留
        if mask_to_buffer is not None and (~mask_to_buffer).any():
            for idx in (~mask_to_buffer).nonzero(as_tuple=False).view(-1).tolist():
                id_lbl = int(id_labels[idx].item())
                if id_lbl in self.quarantine_buffer:
                    self.quarantine_buffer[id_lbl]["count"] = 0
                    self.quarantine_buffer[id_lbl]["last_t"] = timestep
        
        # 清理长时间未更新的缓冲状态
        for id_lbl in list(self.quarantine_buffer.keys()):
            if self.quarantine_buffer[id_lbl]["last_t"] < timestep - 10:  # 超过10帧未更新
                del self.quarantine_buffer[id_lbl]
        
        return keep

    @torch.no_grad()
    def update_health(self, current_id_labels: torch.Tensor, current_features: torch.Tensor, current_masks: torch.Tensor, timestep: int):
        """更新健康度统计（mu/sigma2/usage/margin）。仅对当前帧写入的轨迹更新。"""
        written_indices = (~current_masks).nonzero(as_tuple=False).view(-1)
        if written_indices.numel() == 0:
            return
        for j in written_indices.tolist():
            id_lbl = int(current_id_labels[j].item())
            feat = current_features[j]
            st = self.health_state.get(id_lbl, None)
            if st is None:
                self.health_state[id_lbl] = {
                    "mu": feat.detach().clone(),
                    "sigma2": 0.0,
                    "usage": 1.0,
                    "last_t": int(timestep),
                    "w": 1.0,
                    "margin": 0.0,
                }
            else:
                mu = st["mu"]
                mu_n = mu / (mu.norm(p=2) + 1e-6)
                f_n = feat / (feat.norm(p=2) + 1e-6)
                sim1 = float((mu_n * f_n).sum().item())
                margin_proxy = max(0.0, sim1)
                alpha = torch.sigmoid(
                    torch.tensor(
                        self.ham_alpha_a * 1.0 +
                        self.ham_alpha_b * sim1 +
                        self.ham_alpha_c * margin_proxy -
                        self.ham_alpha_d * math.sqrt(max(st["sigma2"], 0.0) + 1e-6)
                    )
                ).item()
                new_mu = (1.0 - alpha) * mu + alpha * feat
                diff = (feat - new_mu).pow(2).mean().item()
                sigma2 = (1.0 - self.ham_beta_sigma) * st["sigma2"] + self.ham_beta_sigma * diff
                sigma2 = max(0.0, float(sigma2))  # 确保sigma2非负
                self.health_state[id_lbl] = {
                    "mu": new_mu.detach().clone(),
                    "sigma2": sigma2,
                    "usage": float(st["usage"] * self.ham_time_decay + 1.0),
                    "last_t": int(timestep),
                    "w": float(st["w"] * self.ham_time_decay + 1.0),
                    "margin": float(0.7 * st.get("margin", 0.0) + 0.3 * margin_proxy),
                }

    @torch.no_grad()
    def capacity_control(self, trajectory_masks: torch.Tensor, trajectory_id_labels: torch.Tensor, current_masks: torch.Tensor, current_id_labels: torch.Tensor, miss_tolerance: int):
        """按 DACC 场景容量裁剪过多的更新（通过将本帧对应 id 的 current_masks 置 True）。"""
        if not self.use_dacc:
            return
        if trajectory_id_labels.shape[0] == 0 or trajectory_masks.shape[0] == 0:
            return
        if miss_tolerance <= 0 or trajectory_masks.shape[0] == 0:
            return
        try:
            # 检查形状一致性
            if trajectory_id_labels.shape[0] != trajectory_masks.shape[0]:
                print(f"Warning: trajectory_id_labels.shape[0]={trajectory_id_labels.shape[0]} != trajectory_masks.shape[0]={trajectory_masks.shape[0]}")
                return
            last_ids = trajectory_id_labels[-1]
            ids_unique = torch.unique(last_ids)
            n_active = int((~trajectory_masks[-1]).to(torch.int64).sum().item())
            D_t = 1.0 - math.exp(-n_active / 16.0)
            C_t = int(self.c_min + (self.c_max - self.c_min) * max(0.0, min(1.0, D_t)))
            T_hist = min(int(miss_tolerance), int(trajectory_masks.shape[0]))
            
            # 添加调试信息
            debug_mode = os.getenv("MEMORY_DEBUG", "False") == "True"
            if debug_mode:
                print(f"DACC Debug: n_active={n_active}, D_t={D_t:.3f}, C_t={C_t}, T_hist={T_hist}")
            for id_lbl in ids_unique.tolist():
                j = (last_ids == id_lbl).nonzero(as_tuple=False)
                if j.numel() == 0:
                    continue
                j = int(j[0].item())
                active_cnt = int((~trajectory_masks[-T_hist:, j]).to(torch.int64).sum().item())
                if active_cnt > C_t:
                    st = self.health_state.get(id_lbl, None)
                    if st is not None:
                        recency = 1.0
                        usage = min(1.0, st["usage"] / float(T_hist + 1))
                        stability = 1.0 / math.sqrt(st["sigma2"] + 1e-6)
                        margin = st.get("margin", 0.0)
                        H = self.health_lambda1*recency + self.health_lambda2*usage + self.health_lambda3*stability + self.health_lambda4*margin
                    else:
                        H = 0.5
                    # 若健康度低，则取消本帧该 id 的写入（回退）
                    if H < 0.5:
                        # 找到 current 中该 id 的索引并置 True
                        match_idx = (current_id_labels == id_lbl).nonzero(as_tuple=False)
                        if match_idx.numel() > 0:
                            current_masks[int(match_idx[0].item())] = True
        except Exception:
            pass

    def get_config(self) -> dict:
        return {
            "use_dacc": self.use_dacc,
            "dacc_alpha": self.dacc_alpha,
            "dacc_beta": self.dacc_beta,
            "dacc_gamma": self.dacc_gamma,
            "c_min": self.c_min,
            "c_max": self.c_max,
            "dacc_scale_min": self.dacc_scale_min,
            "dacc_scale_max": self.dacc_scale_max,
            "ham_time_decay": self.ham_time_decay,
            "ham_alpha_a": self.ham_alpha_a,
            "ham_alpha_b": self.ham_alpha_b,
            "ham_alpha_c": self.ham_alpha_c,
            "ham_alpha_d": self.ham_alpha_d,
            "ham_beta_sigma": self.ham_beta_sigma,
            "health_lambda1": self.health_lambda1,
            "health_lambda2": self.health_lambda2,
            "health_lambda3": self.health_lambda3,
            "health_lambda4": self.health_lambda4,
            "use_score_calibration": self.use_score_calibration,
            "score_alpha": self.score_alpha,
            "score_beta": self.score_beta,
            "similarity_threshold": self.similarity_threshold,
        }

    def apply_config(self, config: dict):
        self.use_dacc = bool(config.get("use_dacc", self.use_dacc))
        self.dacc_alpha = float(config.get("dacc_alpha", self.dacc_alpha))
        self.dacc_beta = float(config.get("dacc_beta", self.dacc_beta))
        self.dacc_gamma = float(config.get("dacc_gamma", self.dacc_gamma))
        self.c_min = int(config.get("c_min", self.c_min))
        self.c_max = int(config.get("c_max", self.c_max))
        self.dacc_scale_min = float(config.get("dacc_scale_min", self.dacc_scale_min))
        self.dacc_scale_max = float(config.get("dacc_scale_max", self.dacc_scale_max))
        self.ham_time_decay = float(config.get("ham_time_decay", self.ham_time_decay))
        self.ham_alpha_a = float(config.get("ham_alpha_a", self.ham_alpha_a))
        self.ham_alpha_b = float(config.get("ham_alpha_b", self.ham_alpha_b))
        self.ham_alpha_c = float(config.get("ham_alpha_c", self.ham_alpha_c))
        self.ham_alpha_d = float(config.get("ham_alpha_d", self.ham_alpha_d))
        self.ham_beta_sigma = float(config.get("ham_beta_sigma", self.ham_beta_sigma))
        self.health_lambda1 = float(config.get("health_lambda1", self.health_lambda1))
        self.health_lambda2 = float(config.get("health_lambda2", self.health_lambda2))
        self.health_lambda3 = float(config.get("health_lambda3", self.health_lambda3))
        self.health_lambda4 = float(config.get("health_lambda4", self.health_lambda4))
        self.use_score_calibration = bool(config.get("use_score_calibration", self.use_score_calibration))
        self.score_alpha = float(config.get("score_alpha", self.score_alpha))
        self.score_beta = float(config.get("score_beta", self.score_beta))
        self.similarity_threshold = float(config.get("similarity_threshold", self.similarity_threshold))


    @torch.no_grad()
    def compute_track_score(self, detection_features: torch.Tensor, detection_scores: torch.Tensor) -> torch.Tensor:
        """
        计算检测框的跟踪友好度分数

        Args:
            detection_features: 检测特征 [N, D]
            detection_scores: 检测置信度分数 [N]

        Returns:
            track_scores: 跟踪友好度分数 [N]
        """
        if not self.use_score_calibration or detection_features.shape[0] == 0:
            return torch.ones(detection_features.shape[0], dtype=torch.float32, device=detection_features.device)

        # 添加调试模式，暂时去掉try-except以便调试
        debug_mode = os.getenv("MEMORY_DEBUG", "False") == "True"
        
        if debug_mode:
            # 调试模式：不使用try-except，让错误直接抛出
            track_scores = torch.zeros(detection_features.shape[0], dtype=torch.float32, device=detection_features.device)

            for i, det_feat in enumerate(detection_features):
                track_score = 0.0
                
                # 归一化检测特征
                det_feat_norm = det_feat / (det_feat.norm(p=2) + 1e-6)
                
                # 计算特征辨别力（基于特征向量的模长，模长越大辨别力越强）
                feature_discriminability = det_feat.norm(p=2).item()
                
                # 计算与所有原型的相似度和稳定性
                max_similarity = 0.0
                avg_temporal_stability = 0.0
                valid_prototypes = 0
                
                for id_label, health_info in self.health_state.items():
                    proto_feat = health_info["mu"]
                    proto_feat_norm = proto_feat / (proto_feat.norm(p=2) + 1e-6)
                    similarity = (det_feat_norm * proto_feat_norm).sum().item()
                    
                    # 更新最大相似度
                    max_similarity = max(max_similarity, similarity)
                    
                    # 计算时序稳定性（基于轨迹的健康度）
                    usage = health_info.get("usage", 0.0)
                    sigma2 = health_info.get("sigma2", 1.0)
                    temporal_stability = usage * (1.0 / (sigma2 + 1e-6))
                    avg_temporal_stability += temporal_stability
                    valid_prototypes += 1
                
                # 计算平均时序稳定性
                if valid_prototypes > 0:
                    avg_temporal_stability /= valid_prototypes
                
                # 综合计算track_score
                similarity_weight = 0.5
                discriminability_weight = 0.3
                stability_weight = 0.2
                
                track_score = (
                    similarity_weight * max_similarity +
                    discriminability_weight * feature_discriminability +
                    stability_weight * avg_temporal_stability
                )
                
                track_scores[i] = track_score

            # 归一化到0-1范围，防止除以零
            track_max = track_scores.max()
            track_min = track_scores.min()
            if track_max > track_min and track_max > 0:
                track_scores = (track_scores - track_min) / (track_max - track_min)
            elif track_max == track_min and track_max > 0:
                track_scores = torch.ones_like(track_scores) * 0.5  # 所有分数相同时设为0.5

            return track_scores
        else:
            # 生产模式：使用try-except
            try:
                track_scores = torch.zeros(detection_features.shape[0], dtype=torch.float32, device=detection_features.device)

                for i, det_feat in enumerate(detection_features):
                    track_score = 0.0
                    
                    # 归一化检测特征
                    det_feat_norm = det_feat / (det_feat.norm(p=2) + 1e-6)
                    
                    # 计算特征辨别力（基于特征向量的模长，模长越大辨别力越强）
                    feature_discriminability = det_feat.norm(p=2).item()
                    
                    # 计算与所有原型的相似度和稳定性
                    max_similarity = 0.0
                    avg_temporal_stability = 0.0
                    valid_prototypes = 0
                    
                    for id_label, health_info in self.health_state.items():
                        proto_feat = health_info["mu"]
                        proto_feat_norm = proto_feat / (proto_feat.norm(p=2) + 1e-6)
                        similarity = (det_feat_norm * proto_feat_norm).sum().item()
                        
                        # 更新最大相似度
                        max_similarity = max(max_similarity, similarity)
                        
                        # 计算时序稳定性（基于轨迹的健康度）
                        usage = health_info.get("usage", 0.0)
                        sigma2 = health_info.get("sigma2", 1.0)
                        temporal_stability = usage * (1.0 / (sigma2 + 1e-6))
                        avg_temporal_stability += temporal_stability
                        valid_prototypes += 1
                    
                    # 计算平均时序稳定性
                    if valid_prototypes > 0:
                        avg_temporal_stability /= valid_prototypes
                    
                    # 综合计算track_score
                    similarity_weight = 0.5
                    discriminability_weight = 0.3
                    stability_weight = 0.2
                    
                    track_score = (
                        similarity_weight * max_similarity +
                        discriminability_weight * feature_discriminability +
                        stability_weight * avg_temporal_stability
                    )
                    
                    track_scores[i] = track_score

                # 归一化到0-1范围，防止除以零
                track_max = track_scores.max()
                track_min = track_scores.min()
                if track_max > track_min and track_max > 0:
                    track_scores = (track_scores - track_min) / (track_max - track_min)
                elif track_max == track_min and track_max > 0:
                    track_scores = torch.ones_like(track_scores) * 0.5  # 所有分数相同时设为0.5

                return track_scores

            except Exception as e:
                # 如果计算失败，返回默认分数并记录错误
                print(f"Warning: compute_track_score failed: {e}")
                return torch.ones(detection_features.shape[0], dtype=torch.float32, device=detection_features.device)

    @torch.no_grad()
    def calibrate_detection_scores(self, detection_scores: torch.Tensor, detection_features: torch.Tensor) -> torch.Tensor:
        """
        基于记忆的检测框评分校准

        Args:
            detection_scores: 原始检测置信度分数 [N]
            detection_features: 检测特征 [N, D]

        Returns:
            calibrated_scores: 校准后的最终分数 [N]
        """
        if not self.use_score_calibration:
            return detection_scores
        
        # 检查输入形状一致性
        assert detection_scores.shape[0] == detection_features.shape[0], \
            "detection_scores and detection_features must have the same length"

        try:
            # 计算跟踪友好度分数
            track_scores = self.compute_track_score(detection_features, detection_scores)

            # 综合计算最终分数
            calibrated_scores = (
                self.score_alpha * detection_scores +
                self.score_beta * track_scores
            )

            return calibrated_scores

        except Exception:
            # 如果校准失败，返回原始分数
            return detection_scores

