# def _initialize_weights(self):
#     # print(self.modules())
#
#     for m in self.modules():
#         print(m)
#         if isinstance(m, nn.Linear):
#             # print(m.weight.data.type())
#             # input()
#             # m.weight.data.fill_(1.0)
#             init.xavier_uniform_(m.weight, gain=1)
#             print(m.weight)
# -*- coding: utf-8 -*-
"""
# @file name  : grad_vanish_explod.py
# @brief      : 梯度消失与爆炸实验
"""
# example of the xavier weight initialization
from math import sqrt
from numpy import mean
from numpy.random import rand
# number of nodes in the previous layer
n = 10
# calculate the range for the weights
lower, upper = -(1.0 / sqrt(n)), (1.0 / sqrt(n))
# generate random numbers
numbers = rand(1000)
# scale to the desired range
scaled = lower + numbers * (upper - lower)
# summarize
print(lower, upper)
print(scaled.min(), scaled.max())
print(scaled.mean(), scaled.std())

# plot of the bounds on xavier weight initialization for different numbers of inputs
from math import sqrt
from matplotlib import pyplot
# define the number of inputs from 1 to 100
values = [i for i in range(1, 101)]
# calculate the range for each number of inputs
results = [1.0 / sqrt(n) for n in values]
# create an error bar plot centered on 0 for each number of inputs
pyplot.errorbar(values, [0.0 for _ in values], yerr=results)
pyplot.show()
#参考文献https://machinelearningmastery.com/weight-initialization-for-deep-learning-neural-networks/

#参考文献https://blog.csdn.net/yanzhiwen2/article/details/123999351
