#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : 2.自动求导与梯度计算.py
# @Author: Richard Chiming Xu
# @Date  : 2021/11/5
# @Desc  : 自动求导与梯度计算

import numpy as np
import torch
from torch.autograd import Variable

# 制作数据
def gen_data():
    # 生成数据
    def f(x):
        return 10 * x + 4

    x = np.linspace(0, 10, 1000).astype(np.float32)+ np.random.uniform(0,1,size=1)[0]
    y = f(x).astype(np.float32)

    x = x.reshape(1000, 1)
    y = y.reshape(1000, 1)
    return x,y

# 转换与初始化参数
def trans_tensor(x, y):
    # 转换张量
    x_train = torch.from_numpy(x)
    y_train = torch.from_numpy(y)

    # 初始化参数
    w = Variable(torch.randn(1), requires_grad=True)
    b = Variable(torch.zeros(1), requires_grad=True)

    # 构建线性回归模型
    x_train = Variable(x_train)
    y_train = Variable(y_train)
    return x_train, y_train, w, b

# 线性模型
def linear_model(X, w, b):
    return X * w + b

#计算误差
def get_loss(y_predict, y):
    return torch.mean((y_predict - y) ** 2)

x,y = gen_data()
x_train, y_train, w, b = trans_tensor(x, y)

'''
    单次
'''
# y_predict = linear_model(x_train, w, b)
# loss = get_loss(y_predict, y_train)
# #自动求导
# loss.backward()
'''
    模拟10个epoch
'''
for epoch in range(10):
    y_predict = linear_model(x_train, w, b)
    loss = get_loss(y_predict, y_train)

    loss.backward()


    w.data = w.data - 1e-2*w.grad.data
    b.data = b.data - 1e-2*b.grad.data
    print('epoch: {}, loss: {}'.format(epoch, loss.data))

    w.grad.zero_()
    b.grad.zero_()
