# -*- coding: utf-8 -*-
"""
# @file name    : grad_vanish_explode.py
# @author       : QuZhang
# @date         : 2020-12-17 14:36
# @brief        : 梯度消失于爆炸实验
"""
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
from tools.common_tools import set_seed
import torch.nn as nn
import torch
import numpy as np


set_seed(1)

class MLP(nn.Module):
    def __init__(self, neural_num, layers):
        # 定义layers个全连接层，每层的神经元个数为neural_num
        super().__init__()
        self.linears = nn.ModuleList([nn.Linear(neural_num, neural_num, bias=False) for i in range(layers)])
        self.neural_num = neural_num

    def forward(self, x):
        # nn.Module.__call__()
        for (i, linear) in enumerate(self.linears):
            # print("type of linear: ", type(linear))  # torch.nn.modules.linear.Linear
            x = linear(x)
            x = torch.relu(x)

            print("layer: {}, std:{}".format(i, x.std()))
            if torch.isnan(x.std()):
                print("output is nan in {} layers".format(i))
                break

        return x

    def initialize(self):
        # 遍历模型的每一层
        for m in self.modules():
            if isinstance(m, nn.Linear):
                # nn.init.normal_(m.weight.data)
                # 手动
                # nn.init.normal_(m.weight.data, std=np.sqrt(1/self.neural_num))  # mean=0, std=1

                # 手动Xavier
                # a = np.sqrt(6 / (self.neural_num + self.neural_num))  # 计算均匀分布的上下限
                tanh_gain = nn.init.calculate_gain('tanh')  # 计算增益
                # a *= tanh_gain
                # nn.init.uniform_(m.weight.data, -a, a)

                # nn.init.xavier_uniform_(m.weight.data, gain=tanh_gain)

                nn.init.kaiming_normal_(m.weight.data)


if __name__ == '__main__':
    flag = False
    # flag = True
    if flag:
        layer_nums = 100
        neural_nums = 256
        batch_size = 16
        net = MLP(neural_nums, layer_nums)  # 创建模型
        net.initialize()
        inputs = torch.randn((batch_size, neural_nums))  # inputs: mean=0, std=1

        output = net(inputs)
        print(output)

    # =========== 计算方差的增益: calculate gain =========
    flag = True
    if flag:
        x = torch.randn(10000)
        out = torch.tanh(x)

        gain = x.std() / out.std()  # 激活函数的增益：输入数据的方差 / 输出数据的方差
        print("gain:{}".format(gain))

        tanh_gain = nn.init.calculate_gain('tanh')
        print("tanh_gain in PyTorch: ", tanh_gain)