# @Time : 2021/6/19 20:30
# @Author : Li Kunlun
# @Description : 快速搭建神经网络
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.autograd import Variable

# 1、创建数据集
torch.manual_seed(1)

# make fake data
n_data = torch.ones(100, 2)

x0 = torch.normal(2 * n_data, 1)  # class0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100)  # class0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2 * n_data, 1)  # class1 x data (tensor), shape=(100, 2)
y1 = torch.ones(100)  # class1 y data (tensor), shape=(100, 1)

x = torch.cat((x0, x1), 0).type(torch.FloatTensor)  # shape (200, 2) FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(torch.LongTensor)  # shape (200,) LongTensor = 64-bit integer

# 2、建立神经网络
"""
搭建神经网络的方法1：用 class 继承了一个 torch 中的神经网络结构, 然后对其进行了修改
"""


class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(n_feature, n_hidden)  # hidden layer
        self.out = torch.nn.Linear(n_hidden, n_output)  # output layer

    def forward(self, x):
        x = F.relu(self.hidden(x))  # activation function for hidden layer
        x = self.out(x)
        return x


net = Net(n_feature=2, n_hidden=10, n_output=2)  # define the network
"""
Net(
  (hidden): Linear(in_features=2, out_features=10, bias=True)
  (out): Linear(in_features=10, out_features=2, bias=True)
)
"""
print(net)

"""
搭建神经网络的方法2：用 class 继承了一个 torch 中的神经网络结构, 然后对其进行了修改
"""
net2 = torch.nn.Sequential(
    # 按照顺序搭建神经网络，激励函数当成一层加在里面
    torch.nn.Linear(2, 10),
    torch.nn.ReLU(),
    torch.nn.Linear(10, 2)
)
"""
1、结构输出结果
Sequential(
  (0): Linear(in_features=2, out_features=10, bias=True)
  (1): ReLU()
  (2): Linear(in_features=10, out_features=2, bias=True)
)
2、net和net2结构对比：
激励函数也一同纳入进去了, 但是 net 中, 激励函数实际上是在 forward() 功能中才被调用的. 
这也就说明了, 相比 net2, net的好处就是, 可以根据你的个人需要更加个性化你自己的前向传播过程, 比如(RNN)。
"""
print(net2)
