import torch
import random
from d2l import torch as d2l


#1.生成数据集
def systhetic_data(w, b, num_example):
    X = torch.normal(0, 1, (num_example, len(w))) #均值为0， 方差为1， 形状(num_example, len(w))  [1000, 2]
    y = torch.matmul(X, w) + b #进行点积操作  +  bias  #矩阵乘法
    y += torch.normal(0,0.01, y.shape)  #噪声
    return X, y.reshape((-1, 1)) 

true_w = torch.tensor([2,-3.4])
true_b = 4.2
features, labels = systhetic_data(true_w, true_b, 1000)
#X：[1000，2]  y: [1000, 2] x [2, 1]  + b 

print("features:", features[0], "\nlabels:", labels[0]) #输出第一组特征数据， 和第一组标签数据
print(features.shape)  #输出特征size [1000, 1]
d2l.set_figsize()
d2l.plt.scatter(features[:, (1)].detach().numpy(), labels.detach().numpy(), 1, c='g')
# d2l.plt.scatter(features[:, (0)].detach().numpy(), labels.detach().numpy(), 1, c='b')

# d2l.plt.show()  # 显示图像的关键命令


#2.读取数据集    批量抽取数据样本    GPU并行计算    这样loss function梯度也可以并行计算
def data_iter(batch_size, features, labels):
    num_exampel = len(features) #2000
    indices = list(range(num_exampel))  #[0, 1, 2 ……2000]
    #打乱顺序，随机批量采集数据
    random.shuffle(indices) # [4, 8, 2000, 185, 32, ……]
    for i in range(0, num_exampel, batch_size):
        #将索引列表转化成tensor
        batch_indice = torch.tensor(
            indices[i: min(i + batch_size, num_exampel)]
        ) 
        #用张量索引提取批量数据
        yield features[batch_indice], labels[batch_indice]

batch_size = 10 

for X, y in data_iter(batch_size, features, labels):  #直观感受一下
    print(X, "\n", y)
    break #只读取第一批数据

#3.初始化模型参数
w = torch.normal(0, 0.01, size=(2, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)

#4.定义模型
def linreg(X, w, b):
    return torch.matmul(X, w)+b


#5.定义损失函数   l2 = （预测值- 真实值）**2/2
def squared_loss(y_hat, y):
    return (y_hat - y.reshape(y_hat.shape))**2/2


#6.定义优化算法
#params通常就是类似权重参数w, 偏置参数b等需要更新的参数
def sgd(params, lr, batch_size):

    with torch.no_grad():
        for param in params:
            param -= lr*param.grad/batch_size  #反向更新参数梯度，减少loss
            param.grad.zero_()



#7.训练模型
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss

for epoch in range(num_epochs):
    for X, y in data_iter(batch_size, features, labels):
        l = loss(net(X, w, b), y) #X和y的小批量损失
        l.sum().backward() #loss求和，将多维张量转成标量，标量损失才能进行梯度计算
        sgd([w, b], lr, batch_size) #使用参数的梯度更新参数
    
    with torch.no_grad():
        train_l = loss(net(features, w, b), labels)
        print(f'epoch{epoch + 1}, loss {float(train_l.mean()):f}')  #第二个f：浮点数格式化符号

print(f"w的估计误差：{true_w - w.reshape(true_w.shape)}")
print(f"b的估计误差：{true_b - b}")

 