import torch
import copy
# x = torch.tensor(1.0, requires_grad=True)
# y1 = x ** 2
# y1.backward()
# print('第一次求导：',x.grad)
# y2=x**2
# y2.backward()
# print('第二次求导：',x.grad)
#
# # y4=x**2
# # y4.backward()
# # print(x.grad)


#y=10x**3+8x**2+2x
#y=100x=200

#正常反向传播
# x = torch.tensor(2.0, requires_grad=False)
# w = torch.tensor([1.], requires_grad=True)
# zero=torch.zeros(1)
# for i in range(3):
#     print('第次',i+1)
#     y = torch.sum(x*w)
#     print('  w:',w.data)
#     print('  yp:',y.data)
#     loss=(200-y)**2
#     # loss=loss/3.0
#     print('  loss:',loss.data)
#     loss.backward()
#     print('  w.grad:',w.grad)
#     w.data=w.data-0.1*w.grad.data
#     w.grad.data = torch.zeros(1)#注意这里不同于写w.grad.data =zero，详情请参考深浅拷贝
#     print('  w:',w.data)
# print('3次迭代后预测：',torch.sum(x*w))


# x= [1.0, 2.0, 3.0]
# y = [2.0, 4.0, 6.0]
# w = torch.tensor([1.], requires_grad=True)
# i=0
# for x,y in zip(x,y):
#     print('第次',i+1)
#     y = torch.sum(x*w)
#     print('  w:',w.data)
#     print('  yp:',y.data)
#     loss=(200-y)**2
#     # loss=loss/3.0
#     print('  loss:',loss.data)
#     loss.backward()
#     print('  w.grad:',w.grad)
#     w.data=w.data-0.1*w.grad.data
#     w.grad.data = torch.zeros(1)#注意这里不同于写w.grad.data =zero，详情请参考深浅拷贝
#     print('  w:',w.data)
#     i=i+1
# print('3次迭代后预测：',torch.sum(x*w))


# 梯度累加
# x = torch.tensor(2.0, requires_grad=False)
# w = torch.tensor([1.], requires_grad=True)
# zero=torch.zeros(1)
# for i in range(3):
#     print('第次',i+1,'\n','  w:',w.data)
#     y = torch.sum(x*w)
#     print('  yp:',y.data)
#     loss=(200-y)**2
#     # loss=loss/3.0
#     print('  loss:',loss.data)
#     loss.backward()
#     print('  w.grad:',w.grad)
# w.data=w.data-0.1*w.grad.data
# print('更新权重','\n','  w.data:',w.data,'\n','  w.grad:',w.grad)
# w.grad.data = torch.zeros(1)
# print('3次迭代后预测：',torch.sum(x*w))

# x= [1.0, 2.0, 3.0]
# y = [2.0, 4.0, 6.0]
# w = torch.tensor([1.], requires_grad=True)
# i=0
# for x,y in zip(x,y):
#     print('第次',i+1,'\n','  w:',w.data)
#     y = torch.sum(x*w)
#     print('  yp:',y.data)
#     loss=(200-y)**2
#     # loss=loss/3.0
#     print('  loss:',loss.data)
#     loss.backward()
#     print('  w.grad:',w.grad)
#     i = i + 1
# w.data=w.data-0.1*w.grad.data
# print('更新权重','\n','  w.data:',w.data,'\n','  w.grad:',w.grad)
# w.grad.data = torch.zeros(1)#注意这里不同于写w.grad.data =zero，详情请参考深浅拷贝
# print('3次迭代后预测：',torch.sum(x*w))

#loss累加
# loss_sum=0.0
# for i in range(3):
#     print('第次',i+1)
#     y = torch.sum(x*w)
#     print('  loss_sum:',loss_sum,'\n','  w:',w.data,'\n','  yp:',y.data)
#     loss=(200-y)**2
#     loss_sum=loss_sum+loss
#     # loss=loss/3.0
#     print('   loss:',loss.data,'\n','  loss_sum',loss_sum.data)
# loss_sum.backward()
# print('更新权重','\n','  w.data:',w.data,'\n','  w.grad:',w.grad)
# w.data=w.data-0.1*w.grad.data
# # w.grad.data = zero
# print('   w:',w.data)
# print('3次迭代后预测：',torch.sum(x*w))

x= [1.0, 2.0, 3.0]
y = [2.0, 4.0, 6.0]
w = torch.tensor([1.], requires_grad=True)
i=0
loss_sum=0.0
for x,y in zip(x,y):
    print('第次',i+1,'\n','  w:',w.data)
    y = torch.sum(x*w)
    print('  yp:',y.data)
    loss=(200-y)**2
    loss_sum = loss_sum + loss
    # loss=loss/3.0
    print('   loss:', loss.data, '\n', '  loss_sum', loss_sum.data)
loss_sum.backward()
print('更新权重','\n','  w.data:',w.data,'\n','  w.grad:',w.grad)
w.data=w.data-0.1*w.grad.data
w.grad.data = torch.zeros(1)
print('   w:',w.data)
print('3次迭代后预测：',torch.sum(x*w))

'''
import torch
import matplotlib.pyplot as plt

# xdata = [1.0, 2.0, 3.0]
# y_data = [2.0, 4.0, 6.0]

x_data = [1.0]
y_data = [2.0]

w = torch.tensor([1.0])  # 假设 w = 1.0的情况
w.requires_grad = True

def forward(x):  # y^ = wx
    return x * w  # w是tensor 所以 这个乘法自动转换为tensor数乘 , x被转化成tensor 这里构建了一个计算图
def loss(x, y):  # 计算单个的误差 : 损失'''
'''
    每调用一次loss函数,计算图自动构建一次
    :param x:
    :param y:
    :return:
    '''
'''
    y_pred = forward(x)
    # return (y_pred - y) ** 2
    return (y_pred - y)

eli = []
lli = []
print('predict (before training)', 4, forward(4).item())
for epoch in range(10):  # 每轮输出 w的值和损失 loss
    for x, y in zip(x_data, y_data):
        l = loss(x, y)
        l.backward()  # 自动求梯度
        print('\tgrad:', x, y, w.grad.item())
        w.data = w.data - 0.01 * w.grad.data    # 权重的数值更新,纯数值的修改 如果不用.data会新建计算图
        # 如果这里想求平均值 其中的累加操作 要写成sum += l.item()
        w.grad.data.zero_()     # 清空权重里梯度的数据,不然梯度会累加
print(w.data)
# eli.append(epoch)
# lli.append(l.item())
# print('progress:', epoch, l.item())
# print('Predict (after training)', 4, forward(4).item())
'''