# 高阶封装：Model
# https://www.mindspore.cn/tutorials/zh-CN/master/advanced/model.html

import mindspore
from mindspore import nn
from mindspore.dataset import vision, transforms
from mindspore.dataset import MnistDataset
from mindspore.train import Model, CheckpointConfig, ModelCheckpoint, LossMonitor

# 对于简单场景的神经网络，可以在定义Model时指定前向网络network、损失函数loss_fn、优化器optimizer和评价函数metrics。

local_dir = '../../datasets/MNIST_Data'


# Download data from open datasets
# from download import download
# url = "https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/MNIST_Data.zip"
# path = download(url, "./", kind="zip", replace=True)

def datapipe(path, batch_size):
    image_transforms = [
        vision.Rescale(1.0 / 255.0, 0),
        vision.Normalize(mean=(0.1307,), std=(0.3081,)),
        vision.HWC2CHW()
    ]
    label_transform = transforms.TypeCast(mindspore.int32)

    dataset = MnistDataset(path)
    dataset = dataset.map(image_transforms, 'image')
    dataset = dataset.map(label_transform, 'label')
    dataset = dataset.batch(batch_size)
    return dataset


train_dataset = datapipe(local_dir + '/train', 64)
test_dataset = datapipe(local_dir + '/test', 64)


# Define model
class Network(nn.Cell):
    def __init__(self):
        super().__init__()
        self.flatten = nn.Flatten()
        self.dense_relu_sequential = nn.SequentialCell(
            nn.Dense(28 * 28, 512),
            nn.ReLU(),
            nn.Dense(512, 512),
            nn.ReLU(),
            nn.Dense(512, 10)
        )

    def construct(self, x):
        x = self.flatten(x)
        logits = self.dense_relu_sequential(x)
        return logits


model = Network()

# 定义损失函数和优化器
# Instantiate loss function and optimizer
loss_fn = nn.CrossEntropyLoss()
optimizer = nn.SGD(model.trainable_params(), 1e-2)

# 训练及保存模型
steps_per_epoch = train_dataset.get_dataset_size()
config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch)

ckpt_callback = ModelCheckpoint(prefix="mnist", directory=local_dir + "/checkpoint", config=config)
loss_callback = LossMonitor(steps_per_epoch)

from mindspore.train import TimeMonitor

time_monitor = TimeMonitor()

import time
import mindspore as ms


class StopTimeMonitor(ms.train.Callback):

    def __init__(self, run_time):
        """定义初始化过程"""
        super(StopTimeMonitor, self).__init__()
        self.run_time = run_time  # 定义执行时间

    def on_train_begin(self, run_context):
        """开始训练时的操作"""
        cb_params = run_context.original_args()
        cb_params.init_time = time.time()  # 获取当前时间戳作为开始训练时间
        print(f"Begin training, time is: {cb_params.init_time}")

    def on_train_step_end(self, run_context):
        """每个step结束后执行的操作"""
        cb_params = run_context.original_args()
        epoch_num = cb_params.cur_epoch_num  # 获取epoch值
        step_num = cb_params.cur_step_num  # 获取step值
        loss = cb_params.net_outputs  # 获取损失值loss
        cur_time = time.time()  # 获取当前时间戳

        if (cur_time - cb_params.init_time) > self.run_time:
            print(f"End training, time: {cur_time}, epoch: {epoch_num}, step: {step_num}, loss:{loss}")
            run_context.request_stop()  # 停止训练


import mindspore as ms


# 自定义阈值保存模型
class SaveCkptMonitor(ms.train.Callback):
    """定义初始化过程"""

    def __init__(self, loss):
        super(SaveCkptMonitor, self).__init__()
        self.loss = loss  # 定义损失值阈值

    def on_train_step_end(self, run_context):
        """定义step结束时的执行操作"""
        cb_params = run_context.original_args()
        cur_loss = cb_params.net_outputs.asnumpy()  # 获取当前损失值

        # 如果当前损失值小于设定的阈值就停止训练
        if cur_loss < self.loss:
            # 自定义保存文件名
            file_name = f"./checkpoint/{cb_params.cur_epoch_num}_{cb_params.cur_step_num}.ckpt"
            # 保存网络模型
            ms.save_checkpoint(save_obj=cb_params.train_network, ckpt_file_name=file_name)
            print("Saved checkpoint, loss:{:8.7f}, current step num:{:4}.".format(cur_loss, cb_params.cur_step_num))


# 通过MindSpore提供的model.fit接口可以方便地进行网络的训练与评估，LossMonitor可以监控训练过程中loss值的变化。
trainer = Model(model, loss_fn=loss_fn, optimizer=optimizer, metrics={'accuracy'})

datasize = train_dataset.get_dataset_size()
trainer.fit(10, train_dataset, test_dataset,
            callbacks=[ckpt_callback, loss_callback, time_monitor,
                       StopTimeMonitor(4), SaveCkptMonitor(0.05)])

acc = trainer.eval(test_dataset)
print(acc)
