import torch
import pandas as pd
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from sklearn.preprocessing import StandardScaler

# 检查是否有可用的 GPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'

# 加载数据集并标准化
df = pd.read_csv('./data/housing.csv')
features = df[['RM', 'LSTAT', 'PTRATIO']].values
labels = df['MEDV'].values

scaler_features = StandardScaler()
scaler_labels = StandardScaler()

features = scaler_features.fit_transform(features)
labels = scaler_labels.fit_transform(labels.reshape(-1, 1))

# 转换为 PyTorch 张量
features = torch.tensor(features, dtype=torch.float32)
labels = torch.tensor(labels, dtype=torch.float32).view(-1, 1)  # 将标签调整为与模型输出匹配的形状

# 创建 TensorDataset 和 DataLoader
dataset = TensorDataset(features, labels)
data_iter = DataLoader(dataset, batch_size=10, shuffle=True)

# 定义模型
linear = nn.Linear(3, 1)
linear.weight.data.normal_(0, 0.01)
linear.bias.data.fill_(0)

# 将模型移动到相应的设备（CPU或GPU）
net = nn.Sequential(linear).to(device)

# 定义损失函数和优化器
loss = nn.MSELoss()
trainer = torch.optim.SGD(net.parameters(), lr=0.001)

# 开始训练
num_epoch = 500
for epoch in range(num_epoch):
    for x, y in data_iter:
        x, y = x.to(device), y.to(device)  # 将数据移动到相应的设备
        l = loss(net(x), y)
        trainer.zero_grad()
        l.backward()
        trainer.step()
    l = loss(net(features.to(device)), labels.to(device))
    if epoch % 100 == 0:
        print(f"epoch {epoch + 1}, loss {l:f}")

# 定义测试输入
test_input = torch.tensor([[1.2, 2.0, 3.0]], dtype=torch.float32)

# 使用 scaler_features 对输入进行归一化（注意输入应为 numpy 数组）
test_input_normalized = scaler_features.transform(test_input.numpy())

# 将归一化后的数据转换回 PyTorch 张量
test_tensor = torch.tensor(test_input_normalized, dtype=torch.float32).to(device)

# 使用模型预测
predicted_normalized = net(test_tensor)

# 将预测结果转换为 numpy 数组并进行反归一化
predicted_price = scaler_labels.inverse_transform(predicted_normalized.cpu().detach().numpy())

# 输出预测结果
print(predicted_price)
