# coding=utf-8
epochs = 5
batch_size = 64
learning_rate = 1e-3

import numpy as np
import mindspore.nn as nn
from mindspore import Tensor

# 损失函数
loss = nn.L1Loss()
loss = nn.SoftmaxCrossEntropyWithLogits()
# loss = nn.MSELoss()
# loss = nn.SmoothL1Loss()
output_data = Tensor(np.array([[1, 2, 5], [2, 3, 4]]).astype(np.float32))
target_data = Tensor(np.array([[0, 2, 5], [3, 1, 1]]).astype(np.float32))
print(loss(output_data, target_data))

# 优化器
from mindspore import nn
# optim = nn.Momentum(net.trainable_params(), 0.1, 0.9)

# 训练
# 在模型训练过程中，一般分为四个步骤。
# 定义神经网络。
# 构建数据集。
# 定义超参、损失函数及优化器。
# 输入训练轮次和数据集进行训练。


from dataset import download_dataset
from lenet import LeNet5

download_dataset("https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/cifar-10-binary.tar.gz",
                 "../../datasets")
net = LeNet5()
epochs = 5
batch_size = 64
learning_rate = 1e-3

import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.vision.c_transforms as CV
from mindspore import nn, Tensor, Model
from mindspore import dtype as mstype
from mindspore.train.callback import LossMonitor


DATA_DIR = "../../datasets/cifar-10-batches-bin"
# 构建数据集
sampler = ds.SequentialSampler(num_samples=128)
dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler)

# 数据类型转换
type_cast_op_image = C.TypeCast(mstype.float32)
type_cast_op_label = C.TypeCast(mstype.int32)
HWC2CHW = CV.HWC2CHW()
dataset = dataset.map(operations=[type_cast_op_image, HWC2CHW], input_columns="image")
dataset = dataset.map(operations=type_cast_op_label, input_columns="label")
dataset = dataset.batch(batch_size)

# 定义超参、损失函数及优化器
optim = nn.Momentum(net.trainable_params(), learning_rate, 0.9)
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
cb = LossMonitor()

# 输入训练轮次和数据集进行训练
model = Model(net, loss_fn=loss, optimizer=optim)
model.train(epoch=epochs, train_dataset=dataset, callbacks=cb)
