##参考教程https://blog.csdn.net/Campsisgrandiflora/article/details/114192379
#https://blog.csdn.net/Campsisgrandiflora/article/details/114178247
import torch

import torch
import torch.nn as nn
import torch.utils.data as Data


class RegressionModel(nn.Module):
    def __init__(self, n_input, n_hidden, n_output):
        super(RegressionModel, self).__init__()
        self.linear1 = nn.Linear(n_input, n_hidden)
        self.relu = nn.ReLU()
        self.linear2 = nn.Linear(n_hidden, n_output)

    def forward(self, input):
        hidden = self.linear1(input)
        activate = self.relu(hidden)
        output = self.linear2(activate)

        return output

# # 数据
# 原始数据，x与y
x = torch.unsqueeze(torch.linspace(1,10,50), dim=1)
y = x.pow(2)

# 转换成TensorDataset
dataset = Data.TensorDataset(x, y)

# 转换成dataloader
dataloader = Data.DataLoader(dataset=dataset, batch_size=10)


# # 模型
# 模型结构
model = RegressionModel(1, 10, 1)

# 优化器
optim = torch.optim.SGD(model.parameters(), lr=0.0001)
# scheduler = torch.optim.lr_scheduler.StepLR(optim, 1000/)

# 损失
loss_f = nn.MSELoss(reduce=True)

# # 训练
for e in range(5000):
    epoch_loss = 0
    for i, (inp, out) in enumerate(dataloader):
        optim.zero_grad()

        predict = model(inp)
        loss = loss_f(predict, out)

        loss.backward()

        optim.step()
        epoch_loss += loss.data

    # scheduler.step()

    if e % 500 == 0:
        print('the loss of %d batch is : %f' % (e, epoch_loss / (50/10)))
print("上面的是一次batchsize 10的情况")

print("当我们将结果设置batchsize 1的情况就会退化成为随机梯度下降（伪）")
print("当我们将batchsize设置为数据集整体，就会变成梯批量度下降")
##批量梯度下降：在每次迭代时，用整个数据集的所有样本上的梯度计算更新。
#随机梯度下降：在每次迭代时，用单个样本上的梯度计算更新。
#小批量梯度下降：在每次迭代时，用多个但不是全部样本上的梯度计算更新。通常也简称为随机方法。
#参考文献https://blog.csdn.net/sinat_33761963/article/details/104327122
