# from sklearn.datasets import load_boston这里需要注释掉并自行运行一次GEN_1_regression_pytorch_downloads.py
from sklearn.model_selection import train_test_split
import numpy  as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import pandas as pd

# boston = load_boston()
# X,y   = (boston.data, boston.target)
# # boston.data[:2]
# inputs = X.shape[1]
# bos = pd.DataFrame(boston.data)
# print(bos.head())
# 修改前设计到相关因素而无法直接调用

#这里是自行下载后的使用方法，如果没有则需要加上
# import pandas as pd
# import numpy as np
# # 正确的 URL 地址
# data_url = "http://lib.stat.cmu.edu/datasets/boston"
# # 读取数据
# raw_df = pd.read_csv(data_url, sep="\s+", skiprows=22, header=None)
# # 处理数据
# data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
# target = raw_df.values[1::2, 2]
# # 将数据保存到本地文件
# np.savez("boston_housing.npz", data=data, target=target)


data = np.load("boston_housing.npz")
X = data['data']  # 特征数据
y = data['target']  # 目标数据
# 将特征数据转换为 DataFrame
bos = pd.DataFrame(X)
# 打印前几行数据
print(bos.head())
# 获取特征数量
inputs = X.shape[1]
print("特征数量:", inputs)



X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
num_train = X_train.shape[0]
# X_train[:2], y_train[:2]
# num_train


torch.set_default_dtype(torch.float64)
net = nn.Sequential(
    nn.Linear(inputs, 50, bias = True), nn.ReLU(),
    nn.Linear(50, 50, bias = True), nn.ReLU(),
    nn.Linear(50, 50, bias = True), nn.Sigmoid(),
    nn.Linear(50, 1)
)
loss_fn = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr = .001)
num_epochs = 8000


y_train_t = torch.from_numpy(y_train).clone().reshape(-1, 1)
x_train_t = torch.from_numpy(X_train).clone()
y_test_t = torch.from_numpy(y_test).clone().reshape(-1, 1)
x_test_t = torch.from_numpy(X_test).clone()
history = []


for i in range(num_epochs):
    y_pred = net(x_train_t)
    loss = loss_fn(y_train_t,y_pred)
    history.append(loss.data)
    loss.backward()
    optimizer.step()
    optimizer.zero_grad()
    test_loss = loss_fn(y_test_t,net(x_test_t))
    if i > 0 and i % 100 == 0:
        print(f'Epoch {i}, loss = {loss:.3f}, test loss {test_loss:.3f}')


# import pandas as pd
# import numpy as np
# import torch
# import torch.nn as nn
# from sklearn.model_selection import train_test_split
#
# # 下载并保存数据
# data_url = "http://lib.stat.cmu.edu/datasets/boston"
# raw_df = pd.read_csv(data_url, sep="\s+", skiprows=22, header=None)
# data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
# target = raw_df.values[1::2, 2]
# np.savez("boston_housing.npz", data=data, target=target)
#
# # 加载数据
# data = np.load("boston_housing.npz")
# X = data['data']
# y = data['target']
#
# # 数据预处理
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
#
# # 转换为 PyTorch 张量
# X_train = torch.tensor(X_train, dtype=torch.float64)
# X_test = torch.tensor(X_test, dtype=torch.float64)
# y_train = torch.tensor(y_train, dtype=torch.float64).reshape(-1, 1)
# y_test = torch.tensor(y_test, dtype=torch.float64).reshape(-1, 1)
#
# # 定义模型
# torch.set_default_dtype(torch.float64)
# net = nn.Sequential(
#     nn.Linear(X.shape[1], 50), nn.ReLU(),
#     nn.Linear(50, 1)
# )
#
# # 定义损失函数和优化器
# loss_fn = nn.MSELoss()
# optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
#
# # 训练模型
# num_epochs = 8000
# for epoch in range(num_epochs):
#     optimizer.zero_grad()
#     y_pred = net(X_train)
#     loss = loss_fn(y_pred, y_train)
#     loss.backward()
#     optimizer.step()
#
#     if epoch % 100 == 0:
#         print(f"Epoch {epoch}, Loss: {loss.item():.4f}")