# 基于PyTorch的鲍鱼年龄线性回归

import pandas as pd

data = pd.read_csv('./abalone/abalone.data', sep=',')
# print(data.head(5))
column_names = ['Sex', 'Length', 'Diameter', 'Height', 'Whole_weight', 'Shucked_weight', 'Viscera_weight',
                'Shell_weight', 'Rings']
data.columns = column_names
print(data.head(5))

data = pd.get_dummies(data, columns=['Sex'])
# print(data.keys())
x = data[['Length', 'Diameter', 'Height', 'Whole_weight', 'Shucked_weight',
          'Viscera_weight', 'Shell_weight', 'Sex_F', 'Sex_I', 'Sex_M']]
# print(x)
y = data['Rings']
'''
属性名称           数据类型  测量单位  描述
---------------------------------------------
性别(Sex)          名义            M（雄性）、F（雌性）和I（幼年）
长度(Length)       连续     mm       最长的壳测量
直径(Diameter)     连续     mm       与长度垂直
高度(Height)       连续     mm       带壳体的高度
总重(Whole weight) 连续     克       整只鲍的重量
剥离重量(Shucked)  连续     克       肉的重量
内脏重量(Viscera)  连续     克       血流出后的内脏重量
壳重(Shell)        连续     克       干燥后的壳重量
环数(Rings)        整数            年龄，+1.5表示年龄的年数
'''
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaler = scaler.fit_transform(x_train)
x_test_scaler = scaler.transform(x_test)

import torch
import torch.nn as nn
import torch.optim as optim
x_train_tensor = torch.tensor(x_train_scaler, dtype=torch.float32)
x_test_tensor = torch.tensor(x_test_scaler, dtype=torch.float32)
y_train_tensor = torch.tensor(y_train.values, dtype=torch.float32).view(-1, 1)
y_test_tensor = torch.tensor(y_test.values, dtype=torch.float32).view((-1, 1))


import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
#导入TensorboardX 中的SummaryWriter
from tensorboardX import SummaryWriter
#创建SummaryWriter的对象
writer=SummaryWriter(logdir="logs")
# 可视化一个神经网络
model = nn.Sequential(nn.Linear(x_train_tensor.shape[1],1))
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(),lr=0.1)

epochs = 1000
for epochs in range(1, epochs+1):
        output = model(x_train_tensor)
        loss = criterion(output, y_train_tensor)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        writer.add_scalar("loss", loss, epochs)
        writer.add_scalar("learing_rate", optimizer.param_groups[0]["lr"], epochs)
        print(f'训练集损失：{loss.item()}')
        print(f'迭代次数:{epochs}')

writer.add_graph(model,torch.rand(10,))
writer.close()
# python -m tensorboard.main --logdir="./logs"


model.eval()
with torch.no_grad():
        pre = model(x_test_tensor)
        test_loss = criterion(pre, y_test_tensor)
        print(f'测试集损失：{test_loss}')



pre_numpy = pre.detach().cpu().numpy()
y_test_numpy = y_test_tensor.detach().cpu().numpy()

plt.figure(0)
plt.scatter(y_test_numpy, pre_numpy, color = 'yellow')
plt.plot([min(y_test_numpy), max(y_test_numpy)], [min(y_test_numpy), max(y_test_numpy)], color='blue', linewidth=2)
plt.xlabel('Actual Values')
plt.ylabel('Predicted Values')
plt.title('Regression results')

plt.figure(1)
sorted_indices = x_test.index.argsort()
y_test_sorted = y_test.iloc[sorted_indices]
y_pred_sorted = pd.Series(pre_numpy.squeeze()).iloc[sorted_indices]
plt.plot(y_test_sorted.values, label='Acatual Values', marker='o')
plt.plot(y_pred_sorted.values, label='Predicted Values', marker='*')
plt.xlabel('Sorted Index')
plt.ylabel('Values')
plt.title('Actual vs Predicted Values in Linear Regression')

plt.show()

