import torch
import numpy as np
import pandas as pd
import re
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split

# data
ff = open("./boston.data").readlines()
data = []
for item in ff:
    out = re.sub(r"\s+", " ", item.strip())
    # print(out)
    data.append(out.split(" "))
data = np.array(data, dtype=np.float32)
print(data.shape)  # (506, 14)

Y = data[:, -1]
X = data[:, 0:-1]

X_train = X[0:496]
Y_train = Y[0:496]
X_test = X[496:, ...]
Y_test = Y[496:, ...]

print(X_train.shape)
print(Y_train.shape)
print(X_test.shape)
print(Y_test.shape)

# net
class Net(torch.nn.Module):
    def __init__(self, n_feature, n_output):
        super(Net, self).__init__()
        # self.hidden = torch.nn.Linear(n_feature, 100)
        self.predict = torch.nn.Linear(n_feature, n_output)

    def forward(self, x):
        # out = self.hidden(x)
        # out = torch.relu(out)
        out = self.predict(x)
        return out

net = Net(13, 1)  # 特征的数量 输出特征的数量

# loss
loss_func = torch.nn.MSELoss()  # 均方误差损失函数

# optimiter 定义优化器
optimiter = torch.optim.SGD(net.parameters(), lr=0.0001)

# training 训练模型
for i in range(10000):
    x_data = torch.tensor(X_train, dtype=torch.float32) # 样本标签
    y_data = torch.tensor(Y_train, dtype=torch.float32)

    pred = net.forward(x_data)
    loss = loss_func(pred, y_data)

    print(pred.shape) # ([496, 1])
    print(y_data.shape) # ([496])

    torch.squeeze(pred)
    loss = loss_func(pred, y_data) * 0.001

    optimiter.zero_grad()
    loss.backward()
    optimiter.step()

    print("ite:{}, loss:{}".format(i, loss))
    print(pred[0:10])
    print(y_data[0:10])


# test 测试模型