from collections import deque
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from uda_method.im import im
    

class ChannelMemory(nn.Module):
    def __init__(self, layer=[96, 192, 384, 768], momentum=0.8):
        super().__init__()
        # 更新动量
        self.layer_momentum = [
            momentum ** (1/(i+1)) for i in range(len(layer))
        ] # [0.8, 0.89, 0.93, 0.95]
        self.means = [torch.zeros(layer[i]).cuda() for i in range(len(layer))]
        self.stds = [torch.ones(layer[i]).cuda() for i in range(len(layer))]
        self.layer_len = len(layer)

    def update(self, layer_idx, feat_tgt):
        # 目标域特征统计量计算（BCHW）
        mean_t = feat_tgt.mean(dim=(0, 2, 3)).detach()
        std_t = feat_tgt.std(dim=(0, 2, 3)).detach()

        # 质数移动平均更新
        self.means[layer_idx] = self.layer_momentum[layer_idx] * self.means[layer_idx] + (1 - self.layer_momentum[layer_idx]) * mean_t
        self.stds[layer_idx] = self.layer_momentum[layer_idx] * self.stds[layer_idx] + (1 - self.layer_momentum[layer_idx]) * std_t
    def adapt_style(self, idx, feat=None):
        mean_t, std_t = self.means[idx], self.stds[idx]
        if feat is not None:
            B, C, H, W = feat.shape
            feat_norm = (feat - feat.mean(dim=(2,3),keepdim=True)) / (feat.std(dim=(2,3),keepdim=True) + 1e-6)
            
        return feat_norm * std_t.view(1,C,1,1) + mean_t.view(1,C,1,1)

 
class StyleAdaptiveMemory(nn.Module):
    def __init__(self, 
                 channels=96,
                 n_classes=31,
                 momentum=0.9,
                 confidence_thresh=0.8):
        super().__init__()
        
        # 动量系数设计：浅层更新快，深层更新慢（仅前shallow_layers初始化）
        self.momentum = momentum
        self.confidence_thresh = confidence_thresh
        self.n_classes = n_classes
        self.channels = channels
        
        # 类别感知统计量存储（仅浅层）
        self.class_means = [torch.zeros(channels).cuda() for _ in range(n_classes)]
        self.class_stds = [torch.ones(channels).cuda() for _ in range(n_classes)]
        self.class_counts = [torch.zeros(1).cuda() for _ in range(n_classes)]
        
        # 全局统计备份
        self.global_mean = torch.zeros(channels).cuda()
        self.global_std = torch.ones(channels).cuda()


    def update(self, feat_tgt, logits_tgt):
        B, C, H, W = feat_tgt.shape
        # 特征聚合（空间平均）并断开梯度
        feat_repr = feat_tgt.mean(dim=(2,3)).detach()  # [B,C]
        
        # 获取高置信度伪标签
        probs = F.softmax(logits_tgt, dim=1)
        max_probs, pseudo_labels = torch.max(probs, dim=1)
        self.last_confidence = max_probs.detach()  # [B]
        self.last_pseudo_labels = pseudo_labels.detach()  # [B]
        conf_mask = (max_probs > self.confidence_thresh)
        
        # 更新全局统计量（安全计算）
        with torch.no_grad():
            # 安全计算全局统计
            if len(feat_repr) > 1:
                global_mean = feat_repr.mean(dim=0)
                global_std = feat_repr.std(dim=0, unbiased=False)  # 使用有偏估计
            else:
                global_mean = feat_repr[0]  # 单样本情况
                global_std = torch.ones_like(global_mean)
            
            # 指数移动平均更新
            self.global_mean = self.momentum * self.global_mean + (1-self.momentum)*global_mean
            self.global_std = self.momentum * self.global_std + (1-self.momentum)*global_std
        
        # 更新类别统计量
        unique_labels = torch.unique(pseudo_labels[conf_mask])
        # print(f"Updating classes: {unique_labels.cpu().numpy()}")  # 调试输出
        # 更新类别统计量
        for cls in unique_labels:
            cls = int(cls.item()) 
            cls_mask = (pseudo_labels[conf_mask] == cls)
            
            if cls_mask.sum() > 0:  # 至少1个样本
                cls_feat = feat_repr[conf_mask][cls_mask]
                
                # 动态调整动量（样本越多动量越小）
                cls_momentum = self.momentum * (1 - 0.1*torch.log1p(cls_mask.sum()/100))
                
                # 均值更新（始终安全）
                self.class_means[cls] = cls_momentum*self.class_means[cls] + (1-cls_momentum)*cls_feat.mean(0)
                
                # 标准差更新（安全处理）
                if len(cls_feat) >= 2:
                    cls_std = cls_feat.std(dim=0, unbiased=False)  # 有偏估计
                else:
                    cls_std = self.global_std.clone()  # 样本不足时回退全局统计
                
                # 方差安全约束
                cls_std = torch.clamp(cls_std, min=1e-4, max=10.0)
                self.class_stds[cls] = cls_momentum*self.class_stds[cls] + (1-cls_momentum)*cls_std
                
                # 更新计数器
                self.class_counts[cls] += cls_mask.sum()
        return im(logits_tgt)
        

    def adapt_style(self, feat_src, labels_src, alpha=0.1):
        """
        增强版风格迁移函数
        
        Args:
            feat_src: 源特征 [B,C,H,W]
            labels_src: 源标签 [B]
            alpha: 风格迁移强度 (0-1)
            mix_ratio: 原始特征保留比例 (0-1)
        """
        
        B, C, H, W = feat_src.shape
         # 收集统计量（使用列表推导式）
        means = torch.stack([self.class_means[int(cls)] for cls in labels_src])  # [B,C]
        stds = torch.stack([torch.clamp(self.class_stds[int(cls)], 0.1, 10) for cls in labels_src])

        valid = torch.stack([self.class_counts[int(cls)] > 20 for cls in labels_src]) # [B]

        # 应用全局统计后备
        final_means = torch.where(valid, means, self.global_mean.expand_as(means))
        final_stds = torch.where(valid, stds, self.global_std.expand_as(stds))
        # 标准化与迁移
        if hasattr(self, 'last_confidence') and len(self.last_confidence) == B:
            # 使用上次的置信度掩码
            conf = self.last_confidence.view(B,1)
        else:
            conf = torch.full((B,1), 0.5, device=feat_src.device)
        # 动态扰动幅度，置信度越低扰动越大
        alpha_dynamic = alpha * (1.0 - conf)  # [B,1]

        # 生成随机扰动因子 (使用正态分布)
        epsilon_std = alpha_dynamic * torch.randn(B, C, device=feat_src.device)  # 标准差扰动
        epsilon_mean = alpha_dynamic * torch.randn(B, C, device=feat_src.device) # 均值扰动
        
        # 应用扰动到目标域统计量
        perturbed_stds = final_stds * (1 + epsilon_std)
        perturbed_means = final_means * (1 + epsilon_mean)

        # 跨类风格混合：从batch中随机抽取一个风格进行插值
        mix_idx = torch.randint(0, B, (B,), device=feat_src.device)
        mixed_means = final_means[mix_idx]
        mixed_stds = final_stds[mix_idx]
        lambda_mix = torch.rand(B, 1, device=feat_src.device) * alpha  # mix比例
        # 混合后的统计量
        mix_means = (1 - lambda_mix) * perturbed_means + lambda_mix * mixed_means
        mix_stds = (1 - lambda_mix) * perturbed_stds + lambda_mix * mixed_stds


        # 归一化 + 风格迁移
        feat_norm = (feat_src - feat_src.mean(dim=[2,3], keepdim=True)) / (feat_src.std(dim=(2,3), keepdim=True) + 1e-6)
        feat_src_ = feat_norm * mix_stds.view(B,C,1,1) + mix_means.view(B,C,1,1)

        return feat_src_
            

class StyleAdaptiveMemoryV1(nn.Module):
    def __init__(self, 
                 layer_channels: list,  # e.g., [96, 192, 384, 768]
                 n_classes=31,
                 momentum=0.9,
                 confidence_thresh=0.8):
        super().__init__()
        self.momentum = momentum
        self.conf_thresh = confidence_thresh
        self.n_classes = n_classes

        # 为每一层初始化一个结构体（Module + 统计量）
        self.memories = nn.ModuleDict()
        for i, ch in enumerate(layer_channels):
            self.memories[str(i)] = self._init_layer_memory(ch)

    def _init_layer_memory(self, channels):
        mem = nn.Module()
        mem.channels = channels
        mem.class_means = [torch.zeros(channels).cuda() for _ in range(self.n_classes)]
        mem.class_stds = [torch.ones(channels).cuda() for _ in range(self.n_classes)]
        mem.class_counts = [torch.zeros(1).cuda() for _ in range(self.n_classes)]
        mem.global_mean = torch.zeros(channels).cuda()
        mem.global_std = torch.ones(channels).cuda()
        return mem

    def update(self, feat_tgt, logits_tgt, layer_idx: int):
        mem = self.memories[str(layer_idx)]
        B, C, H, W = feat_tgt.shape
        feat_repr = feat_tgt.mean(dim=(2,3)).detach()

        probs = F.softmax(logits_tgt, dim=1)
        max_probs, pseudo_labels = torch.max(probs, dim=1)
        conf_mask = max_probs > self.conf_thresh

        with torch.no_grad():
            global_mean = feat_repr.mean(dim=0) if len(feat_repr) > 1 else feat_repr[0]
            global_std = feat_repr.std(dim=0, unbiased=False) if len(feat_repr) > 1 else torch.ones_like(global_mean)
            mem.global_mean = self.momentum * mem.global_mean + (1 - self.momentum) * global_mean
            mem.global_std = self.momentum * mem.global_std + (1 - self.momentum) * global_std

        for cls in torch.unique(pseudo_labels[conf_mask]):
            cls = int(cls.item())
            cls_mask = pseudo_labels[conf_mask] == cls
            if cls_mask.sum() > 0:
                cls_feat = feat_repr[conf_mask][cls_mask]
                cls_momentum = self.momentum * (1 - 0.1 * torch.log1p(cls_mask.sum() / 100))
                mem.class_means[cls] = cls_momentum * mem.class_means[cls] + (1 - cls_momentum) * cls_feat.mean(0)
                cls_std = cls_feat.std(dim=0, unbiased=False) if len(cls_feat) >= 2 else mem.global_std.clone()
                cls_std = torch.clamp(cls_std, min=1e-4, max=10.0)
                mem.class_stds[cls] = cls_momentum * mem.class_stds[cls] + (1 - cls_momentum) * cls_std
                mem.class_counts[cls] += cls_mask.sum()

    def adapt_style(self, feat_src, labels_src, alpha=0.1, layer_idx=0):
        mem = self.memories[str(layer_idx)]
        B, C, H, W = feat_src.shape
        means = torch.stack([mem.class_means[int(cls)] for cls in labels_src])
        stds = torch.stack([torch.clamp(mem.class_stds[int(cls)], 0.1, 10) for cls in labels_src])
        valid = torch.stack([mem.class_counts[int(cls)] > 20 for cls in labels_src])
        final_means = torch.where(valid, means, mem.global_mean.expand_as(means))
        final_stds = torch.where(valid, stds, mem.global_std.expand_as(stds))

        # 风格扰动：均值/方差添加噪声
        epsilon_std = alpha * torch.randn(B, C, device=feat_src.device)
        epsilon_mean = alpha * torch.randn(B, C, device=feat_src.device)
        perturbed_stds = final_stds * (1 + epsilon_std)
        perturbed_means = final_means * (1 + epsilon_mean)


         # 归一化 + 风格迁移
        feat_norm = (feat_src - feat_src.mean(dim=[2,3], keepdim=True)) / (feat_src.std(dim=(2,3), keepdim=True) + 1e-6)
        feat_src_ = feat_norm * perturbed_stds.view(B,C,1,1) + perturbed_means.view(B,C,1,1)

        return feat_src_

