import torch


class NeuralNetwork(torch.nn.Module):
    """
    torch.nn.Module define network structure.
    """
    def __init__(self, num_inputs, num_outputs):
        super().__init__()
        """
        针对 Linear 创建的网络层，表示是一个线性层。在神经网络中线性层的前向传播遵循 z = a · W + b
         - a 是前一层的输出（或输入特征），形状为(batch_size, in_features)
         -  W 是权重矩阵，形状为(in_features, out_features)，决定了前一层每个特征对当前层输出的 “贡献强度”
         -  b 是偏置向量，形状为(out_features)，用于调整输出的基线
         -  z 是当前层的输出，形状为(batch_size, out_features)
         线性变换的作用是将前一层的特征通过权重组合，映射到新的特征空间，除此之外神经网络还有其他类型的层，如卷积层、池化层、它们的计算比较复杂，
         但本质都是 “加权求和 + 偏置”
         
         神经网络的底层计算是线性变换，如果没有激活函数，无论叠加多少层，整个网络仍然是线性模型，无法拟合复杂的非线性关系（如图像、i
         文本中的特征关联）。ReLU 通过简单的非线性操作（只保留正数，清零负数），使网络能够学习非线性模式,ReLU 的梯度特性:
         - 当输入为 ≥0，ReLU 的梯度为 1
         - 当输入为 <0，ReLU 的梯度为 0

        """
        self.layers = torch.nn.Sequential(
            # 1st hidden layer
            torch.nn.Linear(num_inputs, 30),
            torch.nn.ReLU(),

            # 2nd hidden layer
            torch.nn.Linear(30, 20),
            torch.nn.ReLU(),

            # output layer
            torch.nn.Linear(20, num_outputs)
        )

    def forward(self, x):
        """
        forward: describe how input data passes through the network and comes together as a computation graph
        :param x:
        :return:
        """
        logits = self.layers(x)
        return logits


def test_a5():
    model = NeuralNetwork(num_inputs=50, num_outputs=3)
    """
    model:  NeuralNetwork(
      (layers): Sequential(
        (0): Linear(in_features=50, out_features=30, bias=True)
        (1): ReLU()
        (2): Linear(in_features=30, out_features=20, bias=True)
        (3): ReLU()
        (4): Linear(in_features=20, out_features=3, bias=True)
      )
    )
    """
    print("model: ", model)
    num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)

    # Total number of trainable model parameters:  2213
    print("Total number of trainable model parameters: ", num_params)


    """
    layer 0 weight's shape:  torch.Size([30, 50])
    layer 0 weight:  Parameter containing:
    tensor([[ 0.1307,  0.0516, -0.1140,  ...,  0.0335, -0.0687,  0.0746],
            [-0.0504, -0.0204,  0.1413,  ...,  0.0968,  0.1204,  0.0007],
            [ 0.1249, -0.0693,  0.0093,  ...,  0.1208,  0.0923,  0.0401],
            ...,
            [-0.0744,  0.0399,  0.0255,  ..., -0.1285, -0.0356,  0.1064],
            [-0.0021, -0.1360, -0.0506,  ...,  0.0018,  0.1009, -0.0578],
            [ 0.0413, -0.0206, -0.0136,  ..., -0.1327,  0.1282, -0.0645]],
           requires_grad=True)
    """
    print("layer 0 weight's shape: ", model.layers[0].weight.shape)
    print("layer 0 weight: ", model.layers[0].weight)

    """
    layer 0 bias:  torch.Size([30])

    layer 0 bias:  Parameter containing:
    tensor([-0.1054,  0.0723,  0.0627, -0.1249,  0.0118,  0.0274,  0.0666, -0.0159,
            -0.0179, -0.0864, -0.0560,  0.0560, -0.1061,  0.0211,  0.0113, -0.0931,
             0.1195, -0.0584,  0.0365, -0.0176, -0.1012, -0.0109, -0.1137, -0.0308,
            -0.1373,  0.0277, -0.0242, -0.0036,  0.0794, -0.0194],
           requires_grad=True)
    """
    print("layer 0 bias: ", model.layers[0].bias.shape)
    print("layer 0 bias: ", model.layers[0].bias)


    """
    tensor([[-0.3416,  0.2876, -0.3072]], grad_fn=<AddmmBackward0>)
    grad_fn represents the last-used function to compute a variable in the computation graph.
    """
    torch.manual_seed(123)
    x = torch.randn((1, 50))
    out = model(x)
    print(out)


    """
    if we use model for inference, we should use torch.no_grad() to disable gradient calculation.
    tensor([[ 0.0817, -0.0253,  0.0647]])
    softmax:  tensor([[0.2986, 0.4035, 0.2979]])
    """
    print("model inputs x: ", x)
    with torch.no_grad():
        out2 = model(x)
    print(out2)
    activate_out = torch.softmax(out2, dim=1)
    print("softmax: ", activate_out)
