# 因为层的参数需要保存值和对应的梯度，这里定义梯度，可训练的参数全部以Tensor的类别保存

import numpy as np

np.random.seed(10001)

'''
经过梯度下降和随机梯度下降计算w的导数乘以学习率，简单的梯度计算可以这样计算如果多层复杂的计算如何计算，分为两步：
1.累计计算每个梯度然后推算到最终的结果梯度
2.迭代传递梯度使得损失最小
'''
class Tensor:
    def __init__(self, shape):
        self.data = np.zeros(shape=shape, dtype=np.float32)  # 存放数据
        self.grad = np.zeros(shape=shape, dtype=np.float32)  # 存放梯度

    def clear_grad(self):
        self.grad = np.zeros_like(self.grad)

    def __str__(self):
        return "Tensor shape: {}, data: {}".format(self.data.shape, self.data)


# Tensor的初始化类，目前仅提供Normal初始化和Constant初始化
'''
回调方法call实现初始化，init为初始化参数
'''
class Initializer:
    """
    基类
    """

    def __init__(self, shape=None, name='initializer'):
        self.shape = shape
        self.name = name

    def __call__(self, *args, **kwargs):
        raise NotImplementedError

    def __str__(self):
        return self.name

'''
constant 初始化 卷积核 继承Initializer
'''
class Constant(Initializer):
    def __init__(self, value=0., name='constant initializer', *args, **kwargs):
        super().__init__(name=name, *args, **kwargs)
        self.value = value

    def __call__(self, shape=None, *args, **kwargs):
        if shape:
            self.shape = shape
        assert shape is not None, "the shape of initializer must not be None."
        return self.value + np.zeros(shape=self.shape)

'''
随机初始化 np.random.normal 正态分布 输入均值 和标准差 
assert 断言处理异常情况 不满足直接抛出错误，打印后边信息 等价于raise 主动抛出错误
'''
class Normal(Initializer):
    def __init__(self, mean=0., std=0.01, name='normal initializer', *args, **kwargs):
        super().__init__(name=name, *args, **kwargs)
        self.mean = mean
        self.std = std

    def __call__(self, shape=None, *args, **kwargs):
        if shape:
            self.shape = shape
        assert shape is not None, "the shape of initializer must not be None."
        return np.random.normal(self.mean, self.std, size=self.shape)