"""
@Author  : 吕申凯
@Time    : 2022/9/19 22:07
@File    : 监控实际参数.py
@Function: 
"""

from sklearn.datasets import load_boston
import torch.nn as nn
import torch
import torch.utils.data as Data
from torch.utils.tensorboard import SummaryWriter


class Boston(nn.Module):
    """
        波士顿房价预测
    """

    def __init__(self, input_dim):
        super(Boston, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(input_dim, 64),
            nn.Linear(64, 128),
            nn.Linear(128, 32)
        )
        self.regression = nn.Linear(32, 1)

    def forward(self, x):
        """

        :param x:
        :return:
        """
        out = self.fc(x)
        out = self.regression(out)
        return out


batch_size = 50
# ============================ step 1/5 数据 ============================
# 读取波士顿放假数据信息
X, y = load_boston(return_X_y=True)

# 分为训练集和测试集，并转换成tensor格式
train_X = torch.tensor(X[:int(len(X) * 0.9)], dtype=torch.float32)
test_X = torch.tensor(X[int(len(X) * 0.9):], dtype=torch.float32)
train_Y = torch.tensor(y[:int(len(y) * 0.9)], dtype=torch.float32)
test_Y = torch.tensor(y[int(len(y) * 0.9):], dtype=torch.float32)

# 数据转为TensorDataset格式
train_dataset = Data.TensorDataset(train_X, train_Y)
test_dataset = Data.TensorDataset(test_X, test_Y)
a = test_dataset[1]
# 分成batch
train_loader = Data.DataLoader(
    dataset=train_dataset,  # 数据
    batch_size=batch_size,  # mini batch size
    shuffle=True,  # 是否打乱数据顺序
    # num_workers=2  # 多线程读取数据
)
test_loader = Data.DataLoader(
    dataset=test_dataset,
    batch_size=len(test_dataset),
)
# for step, (batch_x, batch_y) in enumerate(train_loader):
#     print('step:{}, batch_x:{}, batch_y:{},'.format(step, batch_x, batch_y))

# ============================ step 2/5 模型 ============================

# # 网络实例化
boston = Boston(input_dim=13)
# boston.initialize_weights()

# ============================ step 3/5 损失函数 ============================
# 获取损失函数
loss_func = nn.MSELoss()

# ============================ step 4/5 优化器 ============================
# 获取优化器对象
optimizer = torch.optim.Adam(boston.parameters(), lr=0.0001)
# 设置学习率下降策略
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

# ============================ step 5/5 训练 ============================

# 训练的batch数
iter_count = -1

# 构建 SummaryWriter
writer = SummaryWriter(comment='boston', filename_suffix="boston")
# 开始训练
for epoch in range(25):
    '''训练'''

    # 训练的事件数
    event_count = 0
    boston.train()
    for step, (batch_x, batch_y) in enumerate(train_loader):
        iter_count += 1
        # 前向传播
        output = boston(batch_x)
        # 计算损失
        loss = loss_func(output.flatten(), batch_y.flatten())
        # 梯度清空
        optimizer.zero_grad()
        # 反向传播
        loss.backward()
        # 更新网络
        optimizer.step()

        # 统计预测情况
        # 记录损失
        writer.add_scalar("Loss", loss.item(), iter_count)
        # print('loss:{}, iter_count:{}'.format(loss.item(), iter_count))
        # 记录预测值和真实值,每个epoch做一个图
        for event_index in range(len(batch_y)):
            writer.add_scalars("train_value_{}".format(epoch),
                               {
                                   'pre': output.tolist()[event_index][0],
                                   'real': batch_y.tolist()[event_index]
                               },
                               event_count)
            event_count += 1

    # 记录每个epoch的梯度和网络权值
    for name, param in boston.named_parameters():
        writer.add_histogram(name + '_grad', param.grad, epoch)
        writer.add_histogram(name + '_data', param, epoch)

    scheduler.step()  # 更新学习率

    '''测试'''
    boston.eval()
    with torch.no_grad():
        for step, (batch_x, batch_y) in enumerate(test_loader):
            # 前向传播
            output = boston(batch_x)
            # 计算损失
            loss = loss_func(output.flatten(), batch_y.flatten())

            # 添加测试集的平均误差
            writer.add_scalar('error', loss.item(), epoch)
            # 添加测试集中对每个数据的真实值与预测值
            for i in range(len(batch_y)):
                writer.add_scalars('test_value_{}'.format(epoch),
                                   {
                                       'pre': output.tolist()[i][0],
                                       'rel': batch_y.tolist()[i]
                                   },
                                   i)
