import torch
from torch.autograd import Variable

"""
PyTorch： 自定义模块

有时你想要指定比现有模块序列更复杂的模型；对于这种情况，你可以通过继承 nn.Module 来定义自己的模块，并实现 forward 函数，他接收一个输入变量，并使用其他模块或 autograd 操作符来生成输出变量。
在这个例子中，我们实现了一个两层网络来作为自定义模块：
"""

# 两层网络模型
class TwoLayerNet(torch.nn.Module):
    def __init__(self, D_in, H, D_out):
        """
        在构造器中我们实例化两个线性模型放到实例变量里
        In the constructor we instantiate two nn.Linear modules and assign them as member variables.
        """
        super(TwoLayerNet, self).__init__()
        self.linear1 = torch.nn.Linear(D_in, H)
        self.linear2 = torch.nn.Linear(H, D_out)

    def forward(self, x):
        """
        在正向传播函数中我们接受一个输入数据的变量，然后我们必须返回一个输出数据的变量
        我们可以使用构造器中定义的模块以及变量的任意运算符
        In the forward function we accept a Variable of input data and we must return
        a Variable of output data. We can use Modules defined in the constructor as
        well as arbitrary operators on Variables.
        """
        h_relu = self.linear1(x).clamp(min=0)
        y_pred = self.linear2(h_relu)
        return y_pred


# N 批的大小
# D_in 输入的数据纬度
# H 隐藏层纬度
# D_out 输出数据纬度
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10

# 创建随机的张量保存输入和输出，放到变量里
# Create random Tensors to hold inputs and outputs, and wrap them in Variables
x = Variable(torch.randn(N, D_in))
y = Variable(torch.randn(N, D_out), requires_grad=False)

# 用实例化上面的类构造我们的模型
# Construct our model by instantiating the class defined above
model = TwoLayerNet(D_in, H, D_out)

# 构造我们的损失函数和优化器。SGD构造器的model.parameters()包含两个nn.Linear模块模型成员的可学习的参数
# Construct our loss function and an Optimizer. The call to model.parameters()
# in the SGD constructor will contain the learnable parameters of the two
# nn.Linear modules which are members of the model.
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for t in range(500):
    # 正向传播： 把x传递到模型计算预测的y值
    # Forward pass: Compute predicted y by passing x to model
    y_pred = model(x)

    # 计算和打印损失
    # Compute and print loss
    loss = criterion(y_pred, y)
    print(t, loss.data)

    # 清零梯度，执行后向传播，更新权重
    # Zero gradients, perform a backward pass, and update the weights.
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
