#参考文章链接https://blog.csdn.net/sdu_hao/article/details/103099552

import torch
import torch.nn as nn
import numpy as np
import sys

sys.path.append(".")
import torch as d2l
def init_params():
    w = torch.randn((num_inputs, 1), requires_grad=True)
    b = torch.zeros(1, requires_grad=True)
    return [w, b]
def l2_penalty(w):
    return (w**2).sum() / 2


# 可以把这个函数保存在d21zh包里 方便以后使用
def linreg(X, w, b):
    return X.mm(w) + b


# 可以把这个函数保存在d21zh包里 方便以后使用
def squared_loss(y_hat, y):
    # 这里返回的是向量 另外pytorch中的mse loss没有除以2
    return (y_hat - y.view(y_hat.size())) ** 2 / 2


# 可以把这个函数保存在d21zh包里 方便以后使用
def sgd(params, lr, batch_size):
    for param in params:
        # param.data -= lr*param.grad/batch_size  #注意这里更改param时使用的是param.data
        param.data -= lr * param.grad / batch_size  # 注意这里更改param时使用的是param.data
    batch_size, num_epochs, lr = 1, 100, 0.003
    net, loss = d2l.linreg, d2l.squared_loss

    dataset = torch.utils.data.TensorDataset(train_features, train_labels)
    train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)

    def fit_and_plot(lambd):
        w, b = init_params()
        train_ls, test_ls = [], []
        for _ in range(num_epochs):
            for X, y in train_iter:
                # 添加了L2范数惩罚项
                l = loss(net(X, w, b), y) + lambd * l2_penalty(w)
                l = l.sum()

                if w.grad is not None:
                    w.grad.data.zero_()
                    b.grad.data.zero_()
                l.backward()
                d2l.sgd([w, b], lr, batch_size)
            train_ls.append(loss(net(train_features, w, b), train_labels).mean().item())
            test_ls.append(loss(net(test_features, w, b), test_labels).mean().item())
        d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
                     range(1, num_epochs + 1), test_ls, ['train', 'test'])
        print('L2 norm of w:', w.norm().item())

n_train, n_test, num_inputs = 20, 100, 200
true_w, true_b = torch.ones(num_inputs, 1) * 0.01, 0.05

features = torch.randn((n_train + n_test, num_inputs))
labels = torch.matmul(features, true_w) + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
train_features, test_features = features[:n_train, :], features[n_train:, :]
train_labels, test_labels = labels[:n_train], labels[n_train:]
batch_size, num_epochs, lr = 1, 100, 0.003
class Net(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.hidden = torch.nn.Linear(1, 20)
        self.predict = torch.nn.Linear(20, 1)

    def forward(self, x):
        x = F.relu(self.hidden(x))
        x = self.predict(x)
        return x


net_SGD = Net()
net, loss = net_SGD,torch.nn.MSELoss()

dataset = torch.utils.data.TensorDataset(train_features, train_labels)
train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)
def fit_and_plot_pytorch(wd):
    # 对权重参数衰减。权重名称一般是以weight结尾
    net = nn.Linear(num_inputs, 1)
    # 自定义参数初始化
    nn.init.normal_(net.weight, mean=0, std=1)
    nn.init.normal_(net.bias, mean=0, std=1)
    optimizer_w = torch.optim.SGD(params=[net.weight], lr=lr, weight_decay=wd)  # 对权重参数衰减
    optimizer_b = torch.optim.SGD(params=[net.bias], lr=lr)  # 不对偏差参数衰减

    train_ls, test_ls = [], []
    for _ in range(num_epochs):
        for X, y in train_iter:
            l = loss(net(X), y).mean()
            optimizer_w.zero_grad()
            optimizer_b.zero_grad()

            l.backward()

            # 对两个optimizer实例分别调用step函数，从而分别更新权重和偏差
            optimizer_w.step()
            optimizer_b.step()
        train_ls.append(loss(net(train_features), train_labels).mean().item())
        test_ls.append(loss(net(test_features), test_labels).mean().item())
    # d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
    #              range(1, num_epochs + 1), test_ls, ['train', 'test'])
    print('L2 norm of w:', net.weight.data.norm().item())

fit_and_plot_pytorch(2)