import torch
import numpy as np
from matplotlib import pyplot as plt
import torch.utils.data
#数据集的大小为10000且训练集大小为7000，测试集大小为3000
train_data = 7000
test_data = 3000
num_inputs = 500
true_w = torch.ones(num_inputs, 1) * 0.0056
true_b = 0.028

#构建数据集
features = torch.randn(train_data + test_data, num_inputs)  #这次用的randn,上次实验用的rand，参数差了一个name，这里其实用谁都行
labels = torch.matmul(features, true_w) + true_b
#请加入噪音
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()),dtype=torch.float32);
train_features = features[ : train_data, :]
test_features = features[train_data: , :]
train_labels = labels[ : train_data]
test_labels = labels[train_data : ]

#添加一次性读取数据量
batch_size = 10
dataset = torch.utils.data.TensorDataset(train_features, train_labels)
train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)

dataset = torch.utils.data.TensorDataset(test_features, test_labels)
test_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=False)

def init_params():
    w = torch.randn((num_inputs, 1), requires_grad=True)
    b = torch.zeros(1, requires_grad=True)
    return [w, b]

def net(X, w, b):
    return torch.mm(X, w) + b

def loss(y_hat, y):
    return (y_hat - y.view(y_hat.size())) ** 2 / 2

def sgd(params, lr, batch_size):
    for param in params:
        param.data -= lr * param.grad / batch_size


#添加迭代次数
num_epochs = 15
#添加学习率
lr = 0.0005



def train():
    w, b = init_params()
    train_ls, test_ls = [], []

    n, train_loss_sum = 0, 0
    test_data, test_loss_sum = 0, 0

    for epoch in range(num_epochs):
        for X, y in train_iter:
            l = loss(net(X, w, b), y)
            l = l.sum()
            #更新权重
            if w.grad is not None:
                w.grad.data.zero_()
                b.grad.data.zero_()
            l.backward()
            sgd([w, b], lr, batch_size)
            #
            train_loss_sum += l.item()
            n += y.shape[0]
        train_ls.append(train_loss_sum / n)

        for X, y in test_iter:
            l = loss(net(X, w, b), y)
            l = l.sum()
            test_loss_sum += l.item()
            test_data += y.shape[0]
        test_ls.append(test_loss_sum / n)
        print('epoch %d, w= %.4f, b= %.3f' % (epoch + 1, w.sum().item() / w.shape[0], b.sum().item() / b.shape[0]))

    #绘图
    x = np.linspace(0, len(train_ls), len(train_ls))
    plt.plot(x, train_ls, label="train_loss", linewidth=2)
    plt.plot(x, test_ls, label="test_loss", linewidth=1.5)
    plt.xlabel("epoch")
    plt.ylabel("loss")
    plt.legend()
    plt.show()

train()

