"""第三节的学习，2024.9.15"""

import math
import toolpy
import numpy as np
import torch
import time
import random

"""
1. 当我们进行计算的时候，尽量利用线性代数库，而不是开销高昂的for循环
"""
#
# # 验证1 for循环的开销比线性代数运算大
# n = 100000
# a = torch.ones(n)
# b = torch.ones(n)
# c = torch.zeros(n)
#
# timer = toolpy.Timer() #计时器
#
# #开始计算，法一
# for i in range(n):
#     c[i] = a[i] + b[i]
# print("for循环运算花费时间：%.5f"%timer.stop()) #1.12s
#
# # 用矩阵运算
# timer.start()
# c = a+b
# print("矢量运算花费时间：%.5f"%timer.stop()) #0.5s

# """3.1.3 正态分布与平方损失"""
# def normal(x,mu,sigma):
#     p = 1/math.sqrt(2*math.pi*sigma**2)
#     return p*np.exp(-0.5/(sigma**2)*((x-mu)**2))
#
# x = np.arange(-7,7,0.01)

# # 均值和标准差对
# params = [(0,1),(0,2),(3,1)]
# d2l.plot(x,[normal(x,mu,sigma) for mu,sigma in params],
#          xlabel = 'x',
#          ylabel = 'p(x)',
#          figsize = (4.5,2.5),
#          legend = [f'mean{mu},std{sigma}' for mu,sigma in params]
#          )

""" synthetic_data(w,b,num_examples) 生成数据"""

n = 1000
def synthetic_data(w,b,num_examples):
    "生成 y = wx+b+噪声的数据"
    X = torch.normal(0,1,(num_examples,len(w))) #生成n行，w列数据
    X.require_grad = True
    Y = torch.matmul(X,w) + b
    Y += torch.normal(0,0.01,Y.shape)
    return X,Y.reshape(-1,1)

true_w = torch.tensor([3.4,-2.1])
true_b = 7.5

features,labels = synthetic_data(true_w,true_b,n)

# d2l.set_figsize()
# toolpy.plot_scatter(features[:,1],labels,1)

def data_iter(batch_size,features,labels):
    "生成器函数，要求，将两个列表里面的数据，随机一批一批运送"
    num_examples = len(features)
    idxs = list(range(num_examples))
    random.shuffle(idxs) #打乱列表

    for i in range(0,num_examples,batch_size):
        batch_idxs = idxs[i:min(i+batch_size,num_examples)]
        yield features[batch_idxs],labels[batch_idxs]

# 尝试用自己的迭代器打包浏览数据
batch_size = 10
#
# for X,y in data_iter(batch_size,features,labels):
#     print(X,"\n",y)
#     break

"""
第一个，线性预测模型写
模型网络： 线性函数
数据集：随机生成数
损失函数： 均方误差
优化： 批量梯度下降
"""

def linreg(X, w, b):
    "线性回归模型"
    return torch.matmul(X, w) + b

def squard_loss(y_pre, y_real):
    "均方误差"
    return torch.mean((y_pre - y_real)**2)

def sgd(params, lr, batch_size):
    "小批量随机梯度下降"
    with torch.no_grad():
        for param in params:
            # 这里的梯度对应的是每一小批次的误差总和对每个参数的导
            param -= lr*param.grad / batch_size # 这里最好加batch_size,这样的话，就可以类似具体每个样本，它的学习率类似为lr
            param.grad.zero_() #每次计算要清梯度

# lr = 0.1 # 学习率
# num_epoch = 5 #学习轮次
# net = linreg
# loss = squard_loss
# w = torch.normal(0, 1.0, size=(2,1), requires_grad=True)  #对于需要求偏导的，需要设置requires_grad = True,否则无法反向传播
# b = torch.zeros(1, requires_grad = True)
#
# for epoch in range(num_epoch):
#     for X,y in data_iter(batch_size,features,labels):
#         l = loss(net(X,w,b),y)
#         l.sum().backward()
#         sgd([w,b],lr,batch_size) #梯度更新
#       # 训练结束
#
#     # 每一次训练效果
#     with torch.no_grad():
#         train_l = loss(net(features,w,b),labels)
#         print("第 %d 轮训练，均方误差为 %.4f，训练参数为(%.3f, %.3f),%.3f" %(epoch, float(train_l), w[0],w[1], b))