# Copyright (c) OpenMMLab. All rights reserved.
from .image import ImageClassifier
from ..builder import CLASSIFIERS
import torch
from globals import sets

# ------------------------------
# 2. 定义基于统计量对齐损失的函数
# ------------------------------
def alignment_loss_stats(mu_ft, Sigma_ft, mu_pre, Sigma_pre, Q):
    """
    计算统计量对齐损失：
      - 均值对齐： || mu_ft - mu_pre Q ||^2
      - 协方差对齐： || Sigma_ft - Q^T Sigma_pre Q ||_F^2
    注意：这里除以768及768^2用于归一化（假设特征维度为768）。
    """
    loss_mean = torch.norm(mu_ft - mu_pre @ Q, p=2) ** 2 / 768
    loss_cov = torch.norm(Sigma_ft - Q.t() @ Sigma_pre @ Q, p='fro') ** 2 / (768 * 768)
    return loss_mean + loss_cov

@CLASSIFIERS.register_module()
class SimQ_ImageClassifier(ImageClassifier):
    def __init__(self,
                 backbone,
                 neck=None,
                 head=None,
                 pretrained=None,
                 train_cfg=None,
                 init_cfg=None,
                 mu_pre=None,
                 Sigma_pre=None,
                 Q=None,
                 memory_bank_capacity=32):
        super().__init__(
            backbone,
            neck=neck,
            head=head,
            pretrained=pretrained,
            train_cfg=train_cfg,
            init_cfg=init_cfg)
        
        self.mu_pre = mu_pre
        self.Sigma_pre = Sigma_pre      
        self.Q = Q
        
        # 初始化内存银行和容量（用于存储特征表示）
        self.memory_bank = []  # 存储各个 mini-batch 的特征 (detach 后)
        self.memory_bank_capacity = memory_bank_capacity
        self.align = False

    def update_memory_bank(self, feats):
        """
        将当前 batch 的特征 feats 添加到内存银行中，
        并确保内存银行中累计的样本数不超过设定的容量。
        """
        # 将当前 batch 的特征 detach 后加入内存银行
        self.memory_bank.append(feats.detach())
        # 若超过容量，则移除最早加入的样本，直到总样本数低于容量
        while len(self.memory_bank) > self.memory_bank_capacity:
            self.memory_bank.pop(0)


    def compute_memory_bank_stats(self):
        """
        利用内存银行中的所有特征计算均值和协方差统计量。
        若内存银行为空，则返回 (None, None)。
        """
        if len(self.memory_bank) < self.memory_bank_capacity:
            return None, None
        # 将内存银行中的所有特征拼接起来 (shape: [N, d])
        self.align = True
        all_feats = torch.cat(self.memory_bank, dim=0)
        mu_bank = all_feats.mean(dim=0)
        # 计算协方差
        if all_feats.shape[0] > 1:
            centered = all_feats - mu_bank.unsqueeze(0)
            Sigma_bank = (centered.t() @ centered) / (all_feats.shape[0] - 1)
        else:
            Sigma_bank = torch.zeros(all_feats.shape[1], all_feats.shape[1], device=all_feats.device)
        return mu_bank, Sigma_bank

    def forward_train(self, img, gt_label=None, output_attentions=False, **kwargs):
        if self.augments is not None:
            img, gt_label = self.augments(img, gt_label)

        if output_attentions:
            x, attention_weights = self.extract_feat(img, output_attentions)
        else:
            x = self.extract_feat(img, output_attentions)
        if gt_label is None:
            if output_attentions:
                return x[-1], attention_weights
            return x[-1]
        
        losses = dict()
        loss = self.head.forward_train(x, gt_label)
        loss['loss'] = 1 * loss['loss']
        
        # 如果提供了正交矩阵 Q，则计算对齐损失
        if self.Q is not None:
            feats = x[0]  # 假设 x[0] 为用于对齐的特征，形状为 (B, d)
            # 更新内存银行：保存当前 batch 的特征
            self.update_memory_bank(feats)
            # 从内存银行中计算累计的统计量
            mu_bank, Sigma_bank = self.compute_memory_bank_stats()
            if mu_bank is not None and Sigma_bank is not None:
                loss_align = alignment_loss_stats(mu_bank, Sigma_bank, self.mu_pre, self.Sigma_pre, self.Q)
                loss['align_loss'] = 1 * loss_align

        losses.update(loss)
        return losses
