
################################################################################
#
#! Copyright   yangxianpku@pku.edu.cn
# 
# @Author      yangxianpku
# @Description 本脚本实现DL模型中计算层的基类，并给出几种常见模型层的实现。深度学习模型分为2
#       个典型的阶段,即训练阶段和推理(预测)阶段。预测阶段只需要每个计算层进行前向计算, 而训练
#       时需要同时进行正向和反向计算. 因此在实现一个层的时候，我们需要同时实现对应的方法. 另
#       外，因为每一层的数学表达式不同，因此其梯度计算的方式也不一样，对应部分的数学知识假设大
#       家已经掌握。其实本身也不复杂，就是多元函数的Jacobe矩阵。
#             
# @Notice       为了完全抛开任何深度学习框架，我们甚至都不用他们来表示张量数据，因此这里我们
#       统一使用`numpy.ndarray`来表示模型的张量.
################################################################################

import numpy       as     np
import initializer as     init
from   maths       import *

class Layer(object):
    """深度学习模型计算层Layer基础类
    """
    def __init__(self) -> None:
        self.params       = { name : None for name in self.param_names }
        self.nt_params    = { name : None for name in self.nt_param_names }
        
        self.initializers = {}
        self.grads        = {}
        self.shapes       = {}
        
        self._is_train    = True      #  BatchNorm/Dropout中会使用
        self._is_init     = False
        
        self.ctx          = {}
        
        
    def forward(self, inputs):
        raise NotImplementedError
    
    def backward(self, grads):
        raise NotImplementedError
    
    @property
    def is_train(self):
        return self._is_train
    
    @is_train.setter
    def is_train(self, is_train):
        self._is_train = is_train
        
    @property
    def is_init(self):
        return self._is_init
    
    @is_init.setter
    def is_init(self, is_init):
        self._is_init = is_init
        for name in self.param_names:    #! 调用get方法
            self.shapes[name] = self.params[name].shape
            
    @property
    def name(self):
        return self.__class__.__name__
    
    @property
    def param_names(self):
        return ()
    
    @property
    def nt_param_names(self):
        return ()
    
    def _init_params(self):
        """根据个各种初始化器和形状参数，初始化层内的各种参数
        """
        for name in self.param_names:
            self.params[name] = self.initializers[name](self.shapes[name])
        self._is_init = True
        
class Dense(Layer):
    """全连接层: y = input * weigth + bias
    """
    def __init__(self, num_out, w_init=init.XavierNormal(), b_init=init.Zeros()) -> None:
        """全连接层
        @brief 全连接层操作： y = input * weigth + bias
        @
        @param num_out:   输出的节点数
        @param w_init:    权重的初始化分布
        @param b_init:    偏置的初始化分布
        """
        super().__init__()        # 调用父类的构造函数初始化一些参数
        self.initializers = {"weight" : w_init, "bias" : b_init}
        self.shapes       = {"weight" : [None, num_out], "bias" : [num_out]}
        
    
    def forward(self, inputs):  
        if not self.is_init:
            self.shapes['weight'][0] = inputs.shape[1]
            self._init_params()
        self.ctx = {"X" : inputs}      # 使用contex记录传入的tensor, 方便反向传播的时候计算梯度
        #! 其中@表示矩阵运算
        return inputs @ self.params["weight"] + self.params["bias"]
    
    def backward(self, grad):
        """全连接层的反向传播

        Args:
            grad (np.array): 整个损失函数Loss对该算子层输出Output的梯度, 是由整个模型传递过来的，
                其shape等于该算子forward输出的张量shape(Loss是标量, 所以偏导的数量决定于Output的数量);

            self.grads["weight"] = self.ctx["X"].T @ grad
            self.grads["bias"]   = np.sum(grad, axis=0)
        Returns: 该算子对所有forward函数输入参数的梯度， 这里forward只有一个采纳数，因此只返回一个梯度
            【重点】: 计算有有几个输出，backward机会返回每个输入的梯度，这是由链式法则决定的 
            
        """
        self.grads["weight"] = self.ctx["X"].T @ grad   # O/W(输出对weight的偏导) x L/O(损失对输出的偏导)  
        self.grads["bias"]   = np.sum(grad, axis=0)     # O/B(输出对bias的偏导)   x L/O(损失对输出的偏导)  
        return grad @ self.params["weight"].T           # O/I(输出对input的偏导)  x L/O(损失对输出的偏导)  
    
    @property
    def param_names(self):
        return "weight", "bias"
  
  
class Conv2D(Layer):
    pass


class ConvTranpose2D(Conv2D):
    pass


class MaxPool2D(Layer):
    def __init__(self, pool_size=(2, 2), stride=None, padding="VALID"):
        """2D最大池化的前向和反向传播实现

        Args:
            pool_size (tuple, optional): 池化窗口的大小. Defaults to (2, 2).
            stride    (int,   optional): 池化窗口的移动步长(x,y)方向. Defaults to pool_size.
            padding   (str,   optional): 填充的策略. Defaults to "VALID", 可选"SAME","VALID".
        """
        super().__init__()
        self.pool_size     = pool_size
        if stride is None:
            self.stride = self.pool_size
        else:
            if isinstance(stride, tuple):
                assert len(stride) == 2
                self.stride = stride
            elif isinstance(stride, int):
                self.stride = (stride, stride)
            else:
                raise ValueError("stride must be int or tuple")
        self.padding_mode  = padding
        self.padding       = None
        
    def forward(self, input):
        s_h, s_w = self.stride
        k_h, k_w = self.pool_size
        n,h,w,c  = input.shape
        
        # zero-padding
        if self.padding is None:
            self.padding = get_padding_2d(
                (h, w), (k_h, k_w), self.padding_mode)
        #   NHWC: self.padding = ((0,0),(h_pad_before, h_pad_after),(w_pad_before, w_pad_after),(0, 0))
        X = np.pad(input, pad_width=self.padding, mode="constant")
        padded_h, padded_w = X.shape[1:3]

        out_h = (padded_h - k_h) // s_h + 1   # 输出张量的形状: (H, out_h, out_w, C)
        out_w = (padded_w - k_w) // s_w + 1
        
        max_pool = empty((n, out_h, out_w, c))             # 最终的输出
        argmax   = empty((n, out_h, out_w, c), dtype=int)  # 输出得出的每个结果在原始输入的索引，方便后续进行梯度求导
        
        for row in range(out_h):
            win_top = row * s_h                            # pool池化窗口的上边
            for col in range(out_w):
                win_lft = col * s_w                        # pool池化窗口的左边
                # 根据参数选定一个窗口的数据, 大小为: pol_size
                pool = X[:, win_top: win_top+k_h, win_lft: win_lft+k_w, :]
                pool = pool.reshape((n, -1, c))            # 把中间两个维度压扁
                
                _argmax = np.argmax(pool, axis=1)[:, np.newaxis, :]   # (n, 1, c)
                argmax[:, row, col, :] = _argmax.squeeze()

                # 获取最大元素
                _max_pool = np.take_along_axis(pool, _argmax, axis=1).squeeze()
                max_pool[:, row, col, :] = _max_pool
        self.ctx = {"X_shape": X.shape, "out_shape": (out_h, out_w), "argmax" : argmax}
        return max_pool
    
    def backward(self, grad):
        n, h, w, c   = self.ctx['X_shape']
        out_h, out_w = self.ctx['out_shape']
        s_h, s_w     = self.stride
        k_h, k_w     = self.pool_size
        k_sz         = k_h * k_w                 # 池化窗口的元素数量
        pad_h, pad_w = self.padding[1:3]
        
        d_in = np.zeros(shape=(n, h, w, c), dtype=np.float32)
        for row in range(out_h):
            win_top = row * s_h                  # pool池化窗口的上边
            for col in range(out_w):
                win_lft = col * s_w              # pool池化窗口的左边
                _argmax = self.ctx['argmax'][:, row, col, :]
                
                # （N, idx, C）==> (N, C, idx)
                mask = np.eye(k_sz)[_argmax].transpose((0, 2, 1))
                _grad = grad[:, row, col, :][:, np.newaxis, :]
                

                patch = np.repeat(_grad, k_sz, axis=1) * mask  # 将_grad在axis=1重复k_sz次
                patch = patch.reshape((n, k_h, k_w, c))
                
                # 因为相邻的窗口输出的结果，可能来自同一个input的val, 因此是+=
                d_in[:, win_top : win_top + k_h, win_lft : win_lft + k_w, :] += patch
        
        # 剔除填充的部分, 这里的h和w是填充后的
        return d_in[:, pad_h[0]: h-pad_h[1], pad_w[0]: w-pad_w[1], :]

class Reshape(Layer):
    def __init__(self, *shape) -> None:
        super().__init__()
        self.oshape = shape
        self.ishape = None
        
    def forward(self, input):
        self.in_shape = input.shape
        return input.reshape(self.ishape[0], *self.oshape)
    
    def backward(self, grad):
        #! 这非常重要: 简单地将梯度reshape到输入的形状即可
        return  grad.reshape(self.ishape)


class Flatten(Reshape):
    def __init__(self):
        super().__init__(-1)


class Dropout(Layer):
    def __init__(self, keep_ratio = 0.5):
        super().__init__()
        self.keep_ratio = keep_ratio
        self.multiplier = None
    
    def forward(self, input):
        if self._is_train:
            # 模拟一次二项分布
            # 例如，如果你想模拟抛硬币的结果，可以使用 np.random.binomial(1, 0.5) 来模拟一次抛硬币的结果，
            # 其中 n=1 表示进行一次试验，p=0.5 表示硬币正面朝上的概率为 0.5。
            multiplier = np.random.binomial(1, self.keep_ratio, input.shape)
            
            # 在正向传播过程中，Dropout 实际上是对每个神经元的输出乘以一个二元的掩码变量，该掩码变量的值为 0 
            # 或 1，表示是否丢弃对应的神经元输出。这样可以随机地将一部分神经元输出置为零，从而减少神经元之间的
            # 耦合，降低模型过拟合的风险。
            
            # 在正向传播过程中，为了保持输入的期望值不变，需要对丢弃的神经元输出进行缩放，以抵消丢弃神经元造成的影响。
            self._multiplier = multiplier / self.keep_ratio
            output           = input * self._multiplier
        else:
            output = input
        return output
    
    def backward(self, grad):
        assert self.is_train is True
        # 因为Dropout算子的forward只有一个输入，所以只需要返回该输入的梯度, 即return: \frac{\partial Loss}{\partial input }
        #   根据链式法则其又等于: \frac{\partial Loss}{\partial output } \times  \frac{\partial output}{\partial input }
        #                    = grad \times \frac{\partial output}{\partial input }
        #                    = grad \times self._multiplier
        return grad * self._multiplier


class Activation(Layer):
    def __init__(self):
        super().__init__()
        self.inputs = None

    def forward(self, inputs):
        self.ctx["X"] = inputs
        return self.func(inputs)

    def backward(self, grad):
        return self.derivative(self.ctx["X"]) * grad

    def func(self, x):
        raise NotImplementedError

    def derivative(self, x):
        raise NotImplementedError


class Sigmoid(Activation):
    def func(self, x):
        return sigmoid(x)

    def derivative(self, x):
        # Sigmoid是elementwised的算子，直接对x进行符号求导即可得到表达式
        return self.func(x) * (1.0 - self.func(x))


class Softplus(Activation):
    def func(self, x):
        return np.log(1.0 + np.exp(-np.abs(x))) + np.maximum(x, 0.0)

    def derivative(self, x):
        return sigmoid(x)


class Tanh(Activation):
    def func(self, x):
        return np.tanh(x)

    def derivative(self, x):
        return 1.0 - self.func(x) ** 2


class ReLU(Activation):
    def func(self, x):
        return np.maximum(x, 0.0)

    def derivative(self, x):
        return x > 0.0


class LeakyReLU(Activation):
    def __init__(self, slope=0.2):
        super().__init__()
        self._slope = slope

    def func(self, x):
        x = x.copy()
        x[x < 0.0] *= self._slope
        return x

    def derivative(self, x):
        dx = np.ones_like(x)
        dx[x < 0.0] = self._slope
        return dx


class GELU(Activation):
    """Gaussian Error Linear Units
    ref: https://arxiv.org/pdf/1606.08415.pdf
    """
    def __init__(self):
        super().__init__()
        self._alpha = 0.1702
        self._cache = None

    def func(self, x):
        self._cache = sigmoid(self._alpha * x)
        return x * self._cache

    def derivative(self, x):
        return self._cache + x * self._alpha * self._cache * (1.0 - self._cache)


class ELU(Activation):
    def __init__(self, alpha=1.0):
        super().__init__()
        self._alpha = alpha

    def func(self, x):
        return np.maximum(x, 0) + np.minimum(0, self._alpha * (np.exp(x) - 1))

    def derivative(self, x):
        return x > 0.0 + (x < 0.0) * self._alpha * np.exp(x)
    

def get_padding_2d(in_shape, k_shape, mode):
    def get_padding_1d(w, k):
        if mode == "SAME":
            pads = (w - 1) + k - w
            half = pads // 2
            padding = (half, half) if pads % 2 == 0 else (half, half + 1)
        else:
            padding = (0, 0)
        return padding

    h_pad = get_padding_1d(in_shape[0], k_shape[0])
    w_pad = get_padding_1d(in_shape[1], k_shape[1])
    return (0, 0), h_pad, w_pad, (0, 0)