"""
@Author  : 吕申凯
@Time    : 2022/9/20 16:17
@File    : model_sava.py
@Function: 
"""

from sklearn.datasets import load_boston
import torch.nn as nn
import torch
import torch.utils.data as Data
from torch.utils.tensorboard import SummaryWriter
import pandas as pd
import numpy as np


class Boston(nn.Module):
    """
        波士顿房价预测
    """

    def __init__(self, input_dim):
        super(Boston, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(input_dim, 64),
            nn.Linear(64, 128),
            nn.Linear(128, 32)
        )
        self.regression = nn.Linear(32, 1)

    def forward(self, x):
        """

        :param x:
        :return:
        """
        out = self.fc(x)
        out = self.regression(out)
        return out


def get_data():
    """
        读取数据
    :return: 返回DataLoader格式的训练集和测试集
    """
    batch_size = 50
    # 读取波士顿放假数据信息
    X, y = load_boston(return_X_y=True)

    # 分为训练集和测试集，并转换成tensor格式
    train_X = torch.tensor(X[:int(len(X) * 0.9)], dtype=torch.float32)
    test_X = torch.tensor(X[int(len(X) * 0.9):], dtype=torch.float32)
    train_Y = torch.tensor(y[:int(len(y) * 0.9)], dtype=torch.float32)
    test_Y = torch.tensor(y[int(len(y) * 0.9):], dtype=torch.float32)

    # 数据转为TensorDataset格式
    train_dataset = Data.TensorDataset(train_X, train_Y)
    test_dataset = Data.TensorDataset(test_X, test_Y)

    # 分成batch
    train_loader = Data.DataLoader(
        dataset=train_dataset,  # 数据
        batch_size=batch_size,  # mini batch size
        shuffle=True,  # 是否打乱数据顺序
        # num_workers=2  # 多线程读取数据
    )
    test_loader = Data.DataLoader(
        dataset=test_dataset,
        batch_size=len(test_dataset),
    )

    return train_loader, test_loader


def predict(model, phase, dataload, loss_func, optimizer=None, scheduler=None):
    """
        预测
    :param model: 模型
    :param phase:train or test
    :param dataload:数据
    :param loss_func:损失函数
    :param optimizer:优化器
    :param scheduler:学习率下降策略
    :return:
    """
    if phase == 'train':
        # 训练的事件数
        model.train()
    else:
        model.eval()
    for step, (batch_x, batch_y) in enumerate(dataload):
        # 前向传播
        output = model(batch_x)
        # 计算损失
        loss = loss_func(output.flatten(), batch_y.flatten())
        if phase == 'train':
            # 梯度清空
            optimizer.zero_grad()
            # 反向传播
            loss.backward()
            # 更新网络
            optimizer.step()
    if phase == "train":
        scheduler.step()  # 更新学习率


if __name__ == '__main__':
    # 数据
    train_data, test_data = get_data()
    # 网络实例化
    boston = Boston(input_dim=13)
    # 损失函数
    loss_func = nn.MSELoss()
    # 获取优化器对象
    optimizer = torch.optim.Adam(boston.parameters(), lr=0.0001)
    # 设置学习率下降策略
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
    # 开始训练
    for epoch in range(25):
        # 训练
        predict(model=boston, phase='train', dataload=train_data,
                loss_func=loss_func, optimizer=optimizer, scheduler=scheduler)
        # 测试
        predict(model=boston, phase='test', dataload=test_data, loss_func=loss_func)

    # 模型保存路径
    path_model = './pkls/model.pkl'  # 保存整个模型的路径
    path_state_dict = './pkls/state_dict.pkl'  # 只保存参数的路径

    # 保存整个模型
    torch.save(boston, path_model)

    # 只保存模型参数
    net_state_dict = boston.state_dict()
    torch.save(net_state_dict, path_state_dict)

    # 读取模型参数
    net_state_dict = torch.load(path_state_dict)
    boston.load_state_dict(net_state_dict)

    # 读取整个模型
    boston = torch.load(path_model)
