import torch
import numpy as np

ff = open("housing.data").readlines()
data = []
for line in ff:
    data.append(line.split(" "))
data = np.array(data).astype(float)
print(data.shape)


X = data[:, 0:-1] #除最后一列外是参数
Y = data[:,-1] #房价是最后一列

X_train = X[0:496, ...]
Y_train = Y[0:496, ...]
X_test = X[496:, ...]
Y_test = Y[496:, ...]


class Net(torch.nn.Module):
    def __init__(self, n_feature, n_output):
        super(Net, self).__init__()
        self.fc1 = torch.nn.Linear(n_feature, 100)
        #自动学习一个权重矩阵，偏置向量100维
        self.fc2 = torch.nn.Linear(100, n_output)
        #自动学习一个权重矩阵，偏置向量n_output维

    def forward(self, x):
        out = self.fc1(x)
        out = torch.relu(out) #f(x)=max(0,x)
        out = self.fc2(out)
        return out


net = Net(n_feature=13, n_output=1)
loss_func = torch.nn.MSELoss() #均方差
#optimizer = torch.optim.SGD(net.parameters(), lr=0.0001) #随机梯度
optimizer = torch.optim.Adam(net.parameters(), lr=0.01) #自适应学习

for i in range(10000):
    x_data = torch.tensor(X_train, dtype=torch.float)
    y_data = torch.tensor(Y_train, dtype=torch.float)
    prediction = net.forward(x_data)
    prediction = torch.squeeze(prediction) #移除张量中维度大小为 1 的维度
    loss = loss_func(prediction, y_data) * 0.001

    optimizer.zero_grad() #清空梯度
    loss.backward() #反向传播
    optimizer.step() #根据梯度更新参数

torch.save(net.state_dict(), "housing.model")