import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import torch
from sklearn.model_selection import train_test_split  # 这里是引用了交叉验证

pd_data = pd.read_csv('../train.csv')  # 原始数表
X = pd_data.loc[:, ('x1', 'x2', 'x3', 'x4')]
y = pd_data.loc[:, 'Y']

# X = torch.tensor(np.array(X), requires_grad=True, dtype=torch.float32)
# y = torch.tensor(np.array(y), requires_grad=True, dtype=torch.float32)
X = np.asarray(X).tolist()
y = np.asarray(y).tolist()
X = torch.tensor([item.detach().numpy() for item in X], requires_grad=True, dtype=torch.float32)
y = torch.tensor([item.detach().numpy() for item in y], requires_grad=True, dtype=torch.float32)

lr = 0.01
# 2. 通过模型计算y_predict
w = torch.rand([4, 1], requires_grad=True)
b = torch.tensor(0, requires_grad=True, dtype=torch.float32)

# 4. 通过循环，反向传播，更新参数
for i in range(2000):
    # 每次更新参数后，重新计算loss
    # 3. 计算loss
    y_predict = torch.matmul(X, w) + b
    loss = (y - y_predict).pow(2).mean()

    # 判断当前x和y的gradient是否是个数字 => 需要把结果置为0
    # BP中无需梯度累加
    if w.grad is not None:
        w.grad.data.zero_()  # _ 表示原地修改
    if b.grad is not None:
        b.grad.data.zero_()

    loss.backward()  # 反向传播
    w.data = w.data - w.grad * lr
    b.data = b.data - b.grad * lr
    if i % 100 == 0:
        print('w=%s, b=%s, loss=%s' % (w.item(), b.item(), loss.item()))

# 5. 画图
plt.figure(figsize=(20, 8))
plt.scatter(X.numpy().reshape(-1), y.numpy().reshape(-1))  # 将x和y转化为1阶便于画图
y_predict = torch.matmul(X, w) + b
plt.plot(X.numpy().reshape(-1), y_predict.detach().numpy().reshape(-1), c='r')
plt.show()
