import torch as pt
from sklearn.preprocessing import StandardScaler
import numpy as np
import matplotlib.pyplot as plt

pt.manual_seed(777)
np.random.seed(777)
ALPHA = 0.001
N_EPOCHS = 2000

x_data = [[73., 80., 75.], [93., 88., 93.],
          [89., 91., 90.], [96., 98., 100.], [73., 66., 70.]]
y_data = [[152.], [185.], [180.], [196.], [142.]]
# 1.	使用pytorch进行线性回归处理
# (1)	数据处理
# ①	将上面数据传入tensor
scaler = StandardScaler()
x_data = scaler.fit_transform(x_data)
y_data = scaler.fit_transform(y_data)
xt = pt.tensor(x_data)
yt = pt.tensor(y_data)

# ②	创建线性回归模型
model = pt.nn.Linear(3, 1, bias=True)
model = model.double()

# ③	设置mse损失
loss = pt.nn.MSELoss()

# ④	设置随机梯度下降，学习率自定
optim = pt.optim.Adam(model.parameters(), lr=ALPHA)

# (2)	模型处理
# ①	循环2000次
cost_history = np.zeros(N_EPOCHS)
for i in range(N_EPOCHS):

    # ②	初始化梯度
    model.train(True)
    optim.zero_grad()

    # ③	进行梯度下降处理
    ht = model(xt)
    cost = loss(ht, yt)
    cost.backward()
    optim.step()
    model.train(False)
    cost_history[i] = cost.data.numpy()

    # ④	每10次打印损失值
    if i % 10 == 0:
        print(f'#{i + 1}: cost = {cost.data.numpy()}')
if i % 10 != 0:
    print(f'#{i + 1}: cost = {cost.data.numpy()}')

# ⑤	打印预测结果
print('打印预测结果（因特征缩放，预测值和y_data不一致，如有需要可以对预测值做反向变换使其一致。）')
model.eval()
ht = model(xt)
print(ht.data.numpy())

# ⑥	绘制损失值曲线
plt.plot(cost_history)
plt.title('Cost History')
plt.show()
