import torch
import torch.nn as nn


class BN1d(nn.Module):
    """
    简洁版 BatchNorm1d：支持输入形状 (B, C) 或 (B, C, L)。
    - 训练期：使用当前批的均值/方差，并更新 running_mean/var。
    - 推理期：使用 running_mean/var 归一化（行为与 PyTorch 一致）。
    """

    def __init__(self, num_features: int, eps: float = 1e-5, momentum: float = 0.1,
                 affine: bool = True, track_running_stats: bool = True):
        super().__init__()
        self.num_features = num_features
        self.eps = eps
        self.momentum = momentum
        self.affine = affine
        self.track_running_stats = track_running_stats

        if self.affine:
            self.weight = nn.Parameter(torch.ones(num_features))  # gamma
            self.bias = nn.Parameter(torch.zeros(num_features))   # beta
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)

        if self.track_running_stats:
            self.register_buffer('running_mean', torch.zeros(num_features))
            self.register_buffer('running_var', torch.ones(num_features))
            self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
        else:
            self.register_buffer('running_mean', None)
            self.register_buffer('running_var', None)
            self.register_buffer('num_batches_tracked', None)

    def _expand_channel(self, v: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
        # 将 (C,) 参数广播到 x 的形状，例如 (1, C, 1) 或 (1, C, 1, 1)
        if v is None:
            return None
        shape = [1] * x.dim()
        shape[1] = self.num_features
        return v.view(*shape)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        assert x.dim() in (2, 3), f"BN1d 仅支持 (B,C) 或 (B,C,L)，收到形状 {tuple(x.shape)}"
        assert x.size(1) == self.num_features, "channel 维度需等于 num_features"

        # 归一化时的统计维度：除 channel 外的所有维度
        reduce_dims = [i for i in range(x.dim()) if i != 1]

        if self.training:
            # 使用当前批的统计
            batch_mean = x.mean(dim=reduce_dims, keepdim=False)                 # (C,)
            batch_var = x.var(dim=reduce_dims, unbiased=False, keepdim=False)   # (C,)

            if self.track_running_stats:
                with torch.no_grad():
                    self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * batch_mean
                    self.running_var = (1 - self.momentum) * self.running_var + self.momentum * batch_var
                    self.num_batches_tracked += 1

            mean = batch_mean
            var = batch_var
        else:
            # 推理期用运行统计
            if self.track_running_stats and self.running_mean is not None:
                mean = self.running_mean
                var = self.running_var
            else:
                # 若未跟踪运行统计，则退化为用单批统计（不推荐用于推理）
                mean = x.mean(dim=reduce_dims, keepdim=False)
                var = x.var(dim=reduce_dims, unbiased=False, keepdim=False)

        mean_e = self._expand_channel(mean, x)
        var_e = self._expand_channel(var, x)
        y = (x - mean_e) / torch.sqrt(var_e + self.eps)

        if self.affine:
            w = self._expand_channel(self.weight, x)
            b = self._expand_channel(self.bias, x)
            y = w * y + b

        return y


class BN2d(nn.Module):
    """
    简洁版 BatchNorm2d：支持输入形状 (B, C, H, W)。
    实现逻辑与 BN1d 相同，只是允许 4D 张量。
    """

    def __init__(self, num_features: int, eps: float = 1e-5, momentum: float = 0.1,
                 affine: bool = True, track_running_stats: bool = True):
        super().__init__()
        self.num_features = num_features
        self.eps = eps
        self.momentum = momentum
        self.affine = affine
        self.track_running_stats = track_running_stats

        if self.affine:
            self.weight = nn.Parameter(torch.ones(num_features))
            self.bias = nn.Parameter(torch.zeros(num_features))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)

        if self.track_running_stats:
            self.register_buffer('running_mean', torch.zeros(num_features))
            self.register_buffer('running_var', torch.ones(num_features))
            self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
        else:
            self.register_buffer('running_mean', None)
            self.register_buffer('running_var', None)
            self.register_buffer('num_batches_tracked', None)

    def _expand_channel(self, v: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
        if v is None:
            return None
        shape = [1] * x.dim()
        shape[1] = self.num_features
        return v.view(*shape)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        assert x.dim() == 4, f"BN2d 仅支持 (B,C,H,W)，收到形状 {tuple(x.shape)}"
        assert x.size(1) == self.num_features

        reduce_dims = [0, 2, 3]  # 跨 batch、高、宽统计；每个通道单独统计

        if self.training:
            batch_mean = x.mean(dim=reduce_dims, keepdim=False)
            batch_var = x.var(dim=reduce_dims, unbiased=False, keepdim=False)

            if self.track_running_stats:
                with torch.no_grad():
                    self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * batch_mean
                    self.running_var = (1 - self.momentum) * self.running_var + self.momentum * batch_var
                    self.num_batches_tracked += 1

            mean = batch_mean
            var = batch_var
        else:
            if self.track_running_stats and self.running_mean is not None:
                mean = self.running_mean
                var = self.running_var
            else:
                mean = x.mean(dim=reduce_dims, keepdim=False)
                var = x.var(dim=reduce_dims, unbiased=False, keepdim=False)

        mean_e = self._expand_channel(mean, x)
        var_e = self._expand_channel(var, x)
        y = (x - mean_e) / torch.sqrt(var_e + self.eps)

        if self.affine:
            w = self._expand_channel(self.weight, x)
            b = self._expand_channel(self.bias, x)
            y = w * y + b

        return y


if __name__ == '__main__':
    bs = 64
    print('--- 2D input: (B, C) ---')
    m1 = BN1d(400)
    x1 = torch.randn(bs, 400)
    y1 = m1(x1)
    print('y shape:', y1.shape)
    print('gamma/beta:', m1.weight.shape, m1.bias.shape)

    print('\n--- 3D input: (B, C, L) ---')
    m1_3d = BN1d(32)
    x1_3d = torch.randn(bs, 32, 32)
    y1_3d = m1_3d(x1_3d)
    print('y shape:', y1_3d.shape)
    print('gamma/beta:', m1_3d.weight.shape, m1_3d.bias.shape)

    print('\n--- 4D input: (B, C, H, W) ---')
    m2 = BN2d(3)
    x2 = torch.randn(bs, 3, 32, 32)
    y2 = m2(x2)
    print('y shape:', y2.shape)
    print('gamma/beta:', m2.weight.shape, m2.bias.shape)
    print('running_mean/var:', m2.running_mean.shape, m2.running_var.shape)
