from functools import partial
from typing import Optional

import torch
import torch.nn as nn

# TODO: 条件层正则化
class ConditionalLayerNorm(nn.Module):
    def __init__(self, inputs_dim:Optional[int]=None, conditions_dim:Optional[int] = None, center:bool = True,
                 scale:bool = True, epsilon=1e-12, hidden_units = None, hidden_initializer = 'xaiver') -> None:
        
        # 只有使用了gamma(`scale`)或/和beta(`center`), 参数`inputs_dim`才有意义.
        assert (inputs_dim is None and center is None and scale is None) or \
            (inputs_dim is not None and (center is not None or scale is not None)), \
                "The parameter `inputs_dim` must be consistent with the parameters `center` and `scale`."

        # 只有ConditionalLayerNorm才有隐层单元
        if hidden_units is not None: 
            assert conditions_dim is not None, "Only conditional layer normalization has the parameter `hidden_units`."
        
        
        self._inputs_dim = inputs_dim
        self._conditions_dim = conditions_dim
        self._center = center
        self._scale = scale
        self._epsilon = epsilon
        self._hidden_units = hidden_units
        self._hidden_initializer = hidden_initializer
        
        super(ConditionalLayerNorm, self).__init__()
        
        # 定义LayerNorm时的beta和gamma
        if self._center:  # 偏差
            self.beta = nn.Parameter(torch.zeros(self._inputs_dim))
        if self._scale:  # 方差
            self.gamma = nn.Parameter(torch.ones(self._inputs_dim))
        
        # 传入_conditions_dim说明这是“条件规范化层”
        if self._conditions_dim is not None:
            proj_in_dim = self._conditions_dim    # 默认没有隐层单元

            if self._hidden_units is not None:
                self.hidden_layer = nn.Linear(self._conditions_dim, self._hidden_units,bias=False)
                self.initialize_weights(self.hidden_layer.weight, "xaiver")
                proj_in_dim = self._hidden_units    # 有隐层时, beta和gamma的投影输入维度是隐层的输出
            
            if self._center:
                self.proj_beta = nn.Linear(proj_in_dim, self._inputs_dim, bias=False)
                self.initialize_weights(self.proj_beta.weight, "zeros")
            if self._scale:
                self.proj_gamma = nn.Linear(proj_in_dim, self._inputs_dim, bias=False)
                self.initialize_weights(self.proj_gamma.weight, "zeros")

    def forward(self, inputs, conditions=None):
        
        # 调用时传入的参数要与实例化时传入的参数一致.
        assert (self._conditions_dim is None and conditions is None) or \
            (self._conditions_dim is not None and conditions is not None),\
                "Module parameter `conditions_dim`  must match the input `conditions`."
        assert (self._conditions_dim is None and conditions is None) or \
            len(conditions.size()) == 2 or len(conditions.size()) == 3, \
            "The input `conditions` must be the shape of `(batch, conditions_dim)`\
                or `(batch, seqlen, conditions_dim)`."
                
        if conditions is not None:  # 条件层规范化
            if self._hidden_units is not None:
                conditions = self.hidden_layer(conditions)
            # 维度匹配
            for _ in range(len(inputs.size()) - len(conditions.size())):
                conditions = conditions.unsqueeze(1)

            if self._center:
                beta = self.beta + self.proj_beta(conditions)
            if self._scale:
                gamma = self.gamma + self.proj_gamma(conditions)
        else:  # 普通层规范化
            if self._center:
                beta = self.beta
            if self._scale:
                gamma = self.gamma
        
        mean = torch.mean(inputs, dim=2).unsqueeze(2)
        diff = inputs - mean
        variance = torch.mean(torch.pow(diff, 2), dim=2).unsqueeze(2)
        std = torch.sqrt(variance + self._epsilon)
        
        outputs = diff / std
        
        if self._scale:
            outputs *= gamma
        if self._center:
            outputs += beta
        
        return outputs
    
    @staticmethod
    def layer_norm(inputs, gamma, beta, epsilon=1e-12):
        mean = torch.mean(inputs, dim=2).unsqueeze(2)
        diff = inputs - mean
        variance = torch.mean(torch.pow(diff, 2), dim=2).unsqueeze(2)
        std = torch.sqrt(variance + epsilon)
        outputs = gamma * diff / std + beta
        return outputs
        
    @staticmethod
    def initialize_weights(weights, initializer):
        
        if isinstance(initializer, str):
            str2func = {
                "normal": nn.init.normal_,
                "xaiver": nn.init.xavier_uniform_,
                "zeros": partial(nn.init.constant_, val=0)
            }
            initializer = str2func[initializer]
        
        return initializer(weights)

# TODO: 条件Batch正则化