import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.nn import Parameter


class LayerNorm(nn.Module):
    """
    LayerNorm that supports inputs of size B, C, T
    """
    def __init__(
        self,
        num_channels,
        eps = 1e-5,
        affine = True,
        device = None,
        dtype = None,
    ):
        super().__init__()
        factory_kwargs = {'device': device, 'dtype': dtype}
        self.num_channels = num_channels
        self.eps = eps
        self.affine = affine

        if self.affine:
            self.weight = nn.Parameter(
                torch.ones([1, num_channels, 1], **factory_kwargs))
            self.bias = nn.Parameter(
                torch.zeros([1, num_channels, 1], **factory_kwargs))
        else:
            self.register_parameter('weight', None)
            self.register_parameter('bias', None)

    def forward(self, x):
        assert x.dim() == 3
        assert x.shape[1] == self.num_channels

        # normalization along C channels
        mu = torch.mean(x, dim=1, keepdim=True)
        res_x = x - mu
        sigma = torch.mean(res_x**2, dim=1, keepdim=True)
        out = res_x / torch.sqrt(sigma + self.eps)

        # apply weight and bias
        if self.affine:
            out *= self.weight
            out += self.bias

        return out

class BatchMinMaxNorm(nn.Module):
    def __init__(self, num_features, eps=1e-5, momentum=0.1):
        super().__init__()
        self.gamma = Parameter(torch.Tensor(num_features))
        self.beta = Parameter(torch.Tensor(num_features))
        self.register_buffer("moving_min", torch.zeros(num_features))
        self.register_buffer("moving_max", torch.ones(num_features))
        self.register_buffer("eps", torch.tensor(eps))
        self.register_buffer("momentum", torch.tensor(momentum))
        self._reset()
    
    def _reset(self):
        self.gamma.data.fill_(1)
        self.beta.data.fill_(0)

    def forward(self, x):
        if self.training:
            min_value, _ = torch.min(x, dim=0)
            max_value, _ = torch.max(x, dim=0)
            self.moving_min = self.moving_min * self.momentum + min_value * (1 - self.momentum)
            self.moving_max = self.moving_max * self.momentum + max_value * (1 - self.momentum)
        else:
            min_value = self.moving_min
            max_value = self.moving_max
            
        x_norm = (x - min_value) / (max_value - min_value + self.eps)
        return x_norm * self.gamma + self.beta

class BatchMeanNorm(nn.Module):
    def __init__(self, num_features, eps=1e-5, momentum=0.1):
        super().__init__()
        self.gamma = Parameter(torch.Tensor(num_features))
        self.beta = Parameter(torch.Tensor(num_features))
        self.register_buffer("moving_avg", torch.zeros(num_features))
        self.register_buffer("eps", torch.tensor(eps))
        self.register_buffer("momentum", torch.tensor(momentum))
        self._reset()
    
    def _reset(self):
        self.gamma.data.fill_(1)
        self.beta.data.fill_(0)
    
    def forward(self, x):
        if self.training:
            mean = x.mean(dim=0)
            self.moving_avg = self.moving_avg * self.momentum + mean * (1 - self.momentum)
        else:
            mean = self.moving_avg
            
        x_norm = (x- mean)/(x + mean + self.eps)
        return x_norm * self.gamma + self.beta

class BatchNorm(nn.Module):
    def __init__(self, num_features, eps=1e-5, momentum=0.1):
        super().__init__()
        self.gamma = Parameter(torch.Tensor(num_features))
        self.beta = Parameter(torch.Tensor(num_features))
        self.register_buffer("moving_avg", torch.zeros(num_features))
        self.register_buffer("moving_var", torch.ones(num_features))
        self.register_buffer("eps", torch.tensor(eps))
        self.register_buffer("momentum", torch.tensor(momentum))
        self._reset()
    
    def _reset(self):
        self.gamma.data.fill_(1)
        self.beta.data.fill_(0)
    
    def forward(self, x):
        if self.training:
            mean = x.mean(dim=0)
            var = x.var(dim=0)
            self.moving_avg = self.moving_avg * self.momentum + mean * (1 - self.momentum)
            self.moving_var = self.moving_var * self.momentum + var * (1 - self.momentum)
        else:
            mean = self.moving_avg
            var = self.moving_var
            
        x_norm = (x - mean) / (torch.sqrt(var + self.eps))
        return x_norm * self.gamma + self.beta
    
    
class ChannelBlock(nn.Module):
    def __init__(self, channel, ratio=4):
        super(ChannelBlock, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.max_pool = nn.AdaptiveMaxPool1d(1)

        self.shared_MLP = nn.Sequential(
            nn.Conv1d(channel, channel // ratio, 1, bias=False),
            nn.ReLU(),
            nn.Conv1d(channel // ratio, channel, 1, bias=False)
        )
        self.fc = nn.Linear(channel, channel)

    def forward(self, x):
        # x: (S, C=L) S对应不同的采样点，空间, C是通道
        x = torch.unsqueeze(x.permute([1,0]), dim=0)
        avgout = self.shared_MLP(self.avg_pool(x))
        maxout = self.shared_MLP(self.max_pool(x))
        comb_out = avgout + maxout
        out = self.fc(comb_out.squeeze(dim=-1))
        return out
    
if __name__ == '__main__':
    from torchinfo import summary
    
    print("############### ChannelBlock #############")
    model = ChannelBlock(channel=512, ratio=4)
    summary(model, (100, 512), col_names=['input_size','output_size', 'num_params'])