import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np

#自动求导
# x = Variable(torch.FloatTensor([2,3]),requires_grad = True)
# k = Variable(torch.zeros(2))
# k[0] = x[0]**2 + 3*x[1]
# k[1] = x[1]**2 + 2*x[0]

# j = torch.zeros(2,2)
# k.backward(torch.FloatTensor([1,1]),retain_graph = True)
# j = x.grad.data

# x.grad.data.zero_()

# k.backward(torch.FloatTensor([0,1]))
# j[1] = x.grad.data
# print(j)

# #多元函数回归，梯度下降
# w_target = np.array([0.5,3,2.4])
# b_target = np.array([0.9])

# f_des = 'y = {:.2f} + {:.2f}*x + {:.2f}*x^2 + {:.2f}*x^3'.format(b_target[0],w_target[0],w_target[1],w_target[2])

# x_sample = np.arange(-3,3.1,0.1)
# y_sample = b_target[0] + w_target[0]*x_sample + w_target[1]*x_sample**2 + w_target[2]*x_sample**3

# # plt.plot(x_sample,y_sample,label = 'real curve')
# # plt.legend()
# # plt.show()


# x_train = np.stack([x_sample**i for i in range(1,4)],axis=1)
# x_train = torch.from_numpy(x_train).float()
# y_train = torch.from_numpy(y_sample).float().unsqueeze(1)

# w = Variable(torch.randn(3,1),requires_grad = True)
# b = Variable(torch.zeros(1),requires_grad = True)

# x = Variable(x_train)
# y = Variable(y_train)

# def multi_linear(x):
#     return torch.mm(x,w) + b
# def get_loss(y,y_):
#     return torch.mean((y_ - y_train)**2)

# for e in range(100):
#     y_pred = multi_linear(x_train)
#     loss = get_loss(y_train,y_pred)
#     loss.backward()
#     #必须用w.data,不能用w,否则会生成新的图
#     w.data = w.data - 0.001*w.grad.data
#     b.data = b.data - 0.001*b.grad.data
#     if (e+1)%20 == 0:
#         print('epoch {},Loss:{:.5f}'.format(e+1,loss.data))
#     w.grad.data.zero_()
#     b.grad.data.zero_()

# y_pred = multi_linear(x_train)

# plt.plot(x_train.data.numpy()[:,0],y_pred.data.numpy(),label = 'fitting curve',color = 'r')
# plt.plot(x.data.numpy()[:,0],y_sample,label = 'real curve',color = 'b')
# plt.legend()
# plt.show()

#二次拟合三次
w_target = np.array([0.5,3,2.4])
b_target = np.array([0.9])
x_sample = np.arange(-3,3.1,0.1)
y_sample = b_target[0] + w_target[0]*x_sample + w_target[1]*x_sample**2 + w_target[2]*x_sample**3

def follow(x):
    return torch.mm(x,w1)+b1

def loss(y,y_):
    return torch.mean((y_-y)**2)

x_train = np.stack([x_sample**i for i in range(1,3)],axis=1)
x1 = torch.from_numpy(x_train).float()
#张量有广播机制，两个维度不同的张量运算时，会自动调整为相同维度。这里y1原本为[61],
#后面的y_pred1为[61,1],二者相减会得到[61,61]的张量，因此需要先将y1列维度增加一维，变为[61,1]
y1 = torch.from_numpy(y_sample).float().unsqueeze(1)
w1 = Variable(torch.randn(2,1),requires_grad = True)
b1 = Variable(torch.zeros(1),requires_grad = True)

for i in range(1000):
    y_pred1 = follow(x1)
    getloss = loss(y1,y_pred1)
    getloss.backward()
    w1.data = w1.data - 0.001*w1.grad.data
    b1.data = b1.data - 0.001*b1.grad.data
    if (i+1)%20==0:
        print('count:{},loss:{}'.format(i+1,getloss.data))
    w1.grad.data.zero_()
    b1.grad.data.zero_()

y_pred1 = follow(x1)

plt.plot(x_train[:,0],y_sample,label = 'real curve',color = 'r')
plt.plot(x_train[:,0],y_pred1.data,label = 'predict curve',color = 'b')
plt.grid()
plt.show()

