import torch
from load_data import load_data, split_train_test

X, Y = load_data("./housing.data")
X_train, X_test, Y_train, Y_test = split_train_test(X, Y)
print("X_train.shape: ", X_train.shape)
print("X_test.shape: ", X_test.shape)
print("Y_train.shape: ", Y_train.shape)
print("Y_test.shape: ", Y_test.shape)


# net
class Net(torch.nn.Module):
    def __init__(self, n_feature, n_out):
        super().__init__()

        # 不使用隐藏层，直接建立回归
        # self.predict = torch.nn.Linear(n_feature, n_out)

        # 创建一个隐藏层
        self.hidden = torch.nn.Linear(n_feature, 100)
        self.predict = torch.nn.Linear(100, n_out)

    def forward(self, x):
        out = self.hidden(x)
        out = torch.relu(out)

        out = self.predict(out)

        return out


net = Net(13, 1)  # 样本共有13个特征，1个房价列为输出

# loss
loss_func = torch.nn.MSELoss()

# optimizer
# optimizer = torch.optim.SGD(net.parameters(), lr=0.001)
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)

# training
x_train = torch.tensor(X_train, dtype=torch.float32)
y_train = torch.tensor(Y_train, dtype=torch.float32)
x_test = torch.tensor(X_test, dtype=torch.float32)
y_test = torch.tensor(Y_test, dtype=torch.float32)

for i in range(1000):
    print(f">>>>>>>> 第 {i} 次 迭代 >>>>>>>>")

    print("------- 训练集，训练")
    pred = net.forward(x_train)
    print("squeeze前，pred.shape: ", pred.shape)
    pred = torch.squeeze(pred)
    loss = loss_func(pred, y_train) * 0.001

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    print("ite: {}, loss_train: {}".format(i, loss))
    print(pred[:10])
    print(y_train[:10])

    # test
    print("------- 测试集，预测")
    pred = net.forward(x_test)
    pred = torch.squeeze(pred)
    loss_test = loss_func(pred, y_test) * 0.001
    print("ite:{}, loss_test:{}".format(i, loss_test))
    print(f"<<<<<< 第 {i} 次 迭代结束 <<<<<<<")

# 模型保存
torch.save(net, "model/model_reg.pkl")
# torch.load("")

# 仅保存模型参数
# torch.save(net.state_dict(), "params.pkl")
# net.load_state_dict("")
