# coding=utf-8
from resnet import resnet50
import mindspore.nn as nn
import mindspore.dataset as ds
import mindspore.dataset.vision as vision
import os
import time
import matplotlib
import mindspore as ms
# from modelscope.msdatasets import MsDataset
from mindspore import train
import logging

ms.set_context(mode=ms.GRAPH_MODE)

logging.basicConfig(level=logging.INFO, filename="train.log",
                    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                    datefmt='%a, %d %b %Y %H:%M:%S')

batch_size = 32  # 批量大小
image_size = 224  # 训练图像空间大小
num_epochs = 15  # 训练周期数
lr = 0.001  # 学习率
momentum = 0.9  # 动量
workers = 4  # 并行线程个数

# matplotlib.use('TkAgg')

# 数据集目录路径
# train_dataset = MsDataset.load('AiguLiu/plants', subset_name='swift', split='train', custom_cfg=None)
# eval_dataset = MsDataset.load('AiguLiu/plants', subset_name='swift', split='validation', custom_cfg=None)
# test_dataset = MsDataset.load('AiguLiu/plants', subset_name='swift', split='test', custom_cfg=None)

# TODO 需要修改成本地运行目录

local_dir = "/Users/xuyi/.cache/modelscope/hub/datasets/AiguLiu/plants/master/data_files/extracted"
data_path_train = local_dir + "/25bdd151b710f82c3e5b12dddfae083561435bb0165ba8261961405ef92b93da/train/"
data_path_val = local_dir + "/cd817a0b1c4af860d3fc6ab52546b528ac2525614b4c4abc6ec11466c449424b/val"
data_path_test = local_dir + "/0d19feef58332241f0c6ea9c7b438161d0684ec54c9dfb715eb4cba45fd1573d/test"


# 创建训练数据集
def create_dataset_plant(dataset_path, usage):
    """数据加载"""

    data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=workers, shuffle=True)

    # 数据增强操作
    mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
    std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
    scale = 32

    if usage == "train":
        # Define map operations for training dataset
        trans = [
            vision.RandomCropDecodeResize(size=image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
            vision.RandomHorizontalFlip(prob=0.5),
            vision.Normalize(mean=mean, std=std),
            vision.HWC2CHW()
        ]
    else:
        # Define map operations for inference dataset
        trans = [
            vision.Decode(),
            vision.Resize(image_size + scale),
            vision.CenterCrop(image_size),
            vision.Normalize(mean=mean, std=std),
            vision.HWC2CHW()
        ]

    # 数据映射操作
    data_set = data_set.map(operations=trans, input_columns='image', num_parallel_workers=workers)
    # 批量操作
    data_set = data_set.batch(batch_size)

    return data_set


dataset_train = create_dataset_plant(data_path_train, "train")
step_size_train = dataset_train.get_dataset_size()

dataset_val = create_dataset_plant(data_path_val, "val")
step_size_val = dataset_val.get_dataset_size()

dataset_test = create_dataset_plant(data_path_test, "test")
step_size_test = dataset_test.get_dataset_size()

# 创建迭代器
data_loader_train = dataset_train.create_tuple_iterator(num_epochs=num_epochs)
data_loader_val = dataset_val.create_tuple_iterator(num_epochs=num_epochs)
best_ckpt_dir = "../../model/res50-plant/BestCheckpoint"
best_ckpt_path = best_ckpt_dir + "/resnet50-best-freezing-param.ckpt"

# 使用固定特征进行训练的时候，需要冻结除最后一层之外的所有网络层。通过设置 requires_grad == False 冻结参数，以便不在反向传播中计算梯度。
net_work = resnet50(pretrained=True)

# 全连接层输入层的大小
in_channels = net_work.fc.in_channels
# 输出通道数大小为30
head = nn.Dense(in_channels, 30)
# 重置全连接层
net_work.fc = head

# 平均池化层kernel size为7
avg_pool = nn.AvgPool2d(kernel_size=7)
# 重置平均池化层
net_work.avg_pool = avg_pool

# 冻结除最后一层外的所有参数
for param in net_work.get_parameters():
    if param.name not in ["fc.weight", "fc.bias"]:
        param.requires_grad = False

# 定义优化器和损失函数
opt = nn.Momentum(params=net_work.trainable_params(), learning_rate=lr, momentum=0.5)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')


def forward_fn(inputs, targets):
    logits = net_work(inputs)
    loss = loss_fn(logits, targets)
    return loss


# grad_fn = ms.value_and_grad(forward_fn, None, opt.parameters)
#
#
# def train_step(inputs, targets):
#     loss, grads = grad_fn(inputs, targets)
#     opt(grads)
#     return loss
#
#
# # 实例化模型
# model = train.Model(net_work, loss_fn, opt, metrics={"Accuracy": train.Accuracy()})


# 1.9.0 start
from mindspore.train import Model, CheckpointConfig, ModelCheckpoint, LossMonitor

steps_per_epoch = dataset_train.get_dataset_size()
config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch)

ckpt_callback = ModelCheckpoint(prefix="res50_plant", directory="../../model/res50-plant-1.9", config=config)
loss_callback = LossMonitor(steps_per_epoch)

net_work.set_train()
trainer = Model(net_work, loss_fn=loss_fn, optimizer=opt, metrics={'accuracy'})

trainer.fit(1, dataset_train, dataset_val, callbacks=[ckpt_callback, loss_callback])
logging.info("解除参数冻结")
net_work.set_grad(True)

trainer.fit(1, dataset_train, dataset_val, callbacks=[ckpt_callback, loss_callback])

net_work.set_train(False)
eval_result = trainer.eval(dataset_test)
print(eval_result)

# 1.9.0 end
# logging.info("开始微调循环 ...")
#
# best_acc = 0
#
# for epoch in range(num_epochs):
#     losses = []
#     net_work.set_train()
#
#     epoch_start = time.time()
#
#     # 为每轮训练读入数据
#     for i, (images, labels) in enumerate(data_loader_train):
#         labels = labels.astype(ms.int32)
#         loss = train_step(images, labels)
#         losses.append(loss)
#         if i % 10 == 0:
#             logging.info("Epoch: [%3d/%3d/%3d], Average Train Loss: [%5.3f]" % (
#                 i, epoch + 1, num_epochs, sum(losses) / len(losses)
#             ))
#
#     # 每个epoch结束后，验证准确率
#     acc = model.eval(dataset_val)['Accuracy']
#
#     epoch_end = time.time()
#     epoch_seconds = (epoch_end - epoch_start) * 1000
#     step_seconds = epoch_seconds / step_size_train
#
#     logging.info("-" * 20)
#     logging.info("Epoch: [%3d/%3d], Average Train Loss: [%5.3f], Accuracy: [%5.3f]" % (
#         epoch + 1, num_epochs, sum(losses) / len(losses), acc
#     ))
#     logging.info("epoch time: %5.3f ms, per step time: %5.3f ms" % (
#         epoch_seconds, step_seconds
#     ))
#     if epoch > num_epochs - 10:
#         logging.info("解除参数冻结")
#         net_work.set_grad(True)
#
#     if acc > best_acc:
#         best_acc = acc
#         if not os.path.exists(best_ckpt_dir):
#             os.mkdir(best_ckpt_dir)
#         ms.save_checkpoint(net_work, best_ckpt_path)
#
# logging.info("=" * 80)
# logging.info("End of validation the best Accuracy is: %5.3f, save the best ckpt file in %s", best_acc, best_ckpt_path)
#
# ## todo 测试数据集
# acc = model.eval(dataset_test)['Accuracy']
# logging.info("test acc:", acc)
